mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 11:41:27 +00:00
Compare commits
203 Commits
actions-ru
...
actions-ru
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8566a4f453 | ||
|
|
3366dc9a63 | ||
|
|
fa94799ec8 | ||
|
|
c424d1afee | ||
|
|
99f83a9bf0 | ||
|
|
aa7d4c5ecc | ||
|
|
552ee28072 | ||
|
|
fa77facacd | ||
|
|
5b28f3d964 | ||
|
|
c36748b8bc | ||
|
|
f16f5b0aa4 | ||
|
|
c889b92f45 | ||
|
|
46be20976a | ||
|
|
8c42f99d0b | ||
|
|
a93fd21f21 | ||
|
|
7523ea44f1 | ||
|
|
30ab0c0b71 | ||
|
|
a72f190ef6 | ||
|
|
cb60c1ec3b | ||
|
|
e108e04dda | ||
|
|
2e083bca28 | ||
|
|
198b13324d | ||
|
|
605dae3995 | ||
|
|
d2b0920454 | ||
|
|
2cbeca0e7c | ||
|
|
859e04a680 | ||
|
|
c0821d4ede | ||
|
|
c3a6e45920 | ||
|
|
818dfd6515 | ||
|
|
726b39aedd | ||
|
|
7638c21e92 | ||
|
|
c09d6075c6 | ||
|
|
39d37a7d28 | ||
|
|
de0315380d | ||
|
|
906ddacbc6 | ||
|
|
c388446668 | ||
|
|
d56971ca7c | ||
|
|
cb14d7530b | ||
|
|
fbb24c8c0a | ||
|
|
0b88b246d3 | ||
|
|
a4631f345b | ||
|
|
7be31ce3e5 | ||
|
|
57a7b8076f | ||
|
|
5309b1c02c | ||
|
|
ae09e6ebb7 | ||
|
|
3cd124dce3 | ||
|
|
25f5817a5e | ||
|
|
0510f19607 | ||
|
|
9d961c58ff | ||
|
|
ab25907050 | ||
|
|
6cbba80df1 | ||
|
|
082245c5db | ||
|
|
a82e020daa | ||
|
|
c8c2d44a5c | ||
|
|
4e7b8b57c0 | ||
|
|
e7020c7c0f | ||
|
|
cb54864387 | ||
|
|
0e0f385f72 | ||
|
|
b3cae25741 | ||
|
|
469b117a09 | ||
|
|
5f59734078 | ||
|
|
e00b3b9714 | ||
|
|
588872a316 | ||
|
|
a0feee257f | ||
|
|
a18ac330bb | ||
|
|
0901456320 | ||
|
|
dbd7b486d2 | ||
|
|
7e766282aa | ||
|
|
ba175148c8 | ||
|
|
358146ee54 | ||
|
|
e9dd16b023 | ||
|
|
1ba4098648 | ||
|
|
05fb8569b3 | ||
|
|
db45a375d0 | ||
|
|
81dd47a893 | ||
|
|
6b77a2a5a8 | ||
|
|
dc4cf3f57b | ||
|
|
d810b579a5 | ||
|
|
47c8de9dc3 | ||
|
|
74a53bde5e | ||
|
|
aad2615487 | ||
|
|
03d9b6a09f | ||
|
|
5d280cc8c8 | ||
|
|
133c4fb21e | ||
|
|
3b2d2c052e | ||
|
|
37c2a62fa8 | ||
|
|
2eeb56d1c8 | ||
|
|
a612b38f9b | ||
|
|
1c67ea65d9 | ||
|
|
c26fb5ad5f | ||
|
|
325c2cc385 | ||
|
|
2e551c9d0a | ||
|
|
7b44454d01 | ||
|
|
f2680b2f2d | ||
|
|
b42b8406a2 | ||
|
|
3c125e2191 | ||
|
|
9ed245c85e | ||
|
|
5b7807d54b | ||
|
|
156e2c1987 | ||
|
|
da4dfb3fdf | ||
|
|
0783ffe989 | ||
|
|
374105c1f3 | ||
|
|
bc6e499e4f | ||
|
|
07f822bb08 | ||
|
|
3a0332dfdc | ||
|
|
f6ab66c55b | ||
|
|
d874a5cfda | ||
|
|
c424215044 | ||
|
|
c5fdfd63db | ||
|
|
23a45eaf87 | ||
|
|
dee997b44e | ||
|
|
2929a739e3 | ||
|
|
3cccca8d09 | ||
|
|
7a7086e7aa | ||
|
|
565b14a148 | ||
|
|
ecc441de3f | ||
|
|
25335bb3c3 | ||
|
|
9b871567b1 | ||
|
|
264cf494e3 | ||
|
|
3f23501b8e | ||
|
|
5530030c67 | ||
|
|
8d3a83b07a | ||
|
|
a6270b44d5 | ||
|
|
2273b198a1 | ||
|
|
3d62e73f8c | ||
|
|
f5c639ae28 | ||
|
|
81016154c0 | ||
|
|
728829be7b | ||
|
|
c0b8f9d483 | ||
|
|
ced1c2321a | ||
|
|
1b8a656051 | ||
|
|
1753fa3530 | ||
|
|
8c0f3dfc79 | ||
|
|
dbda292f54 | ||
|
|
550a864198 | ||
|
|
4fa5315311 | ||
|
|
11e58fcc41 | ||
|
|
f220fefe92 | ||
|
|
56b4598d1d | ||
|
|
8f977dbe48 | ||
|
|
9ae3551744 | ||
|
|
05ad3f5469 | ||
|
|
9c7372a8e0 | ||
|
|
584590e97c | ||
|
|
d18884a0b9 | ||
|
|
f987571b64 | ||
|
|
450e384c4c | ||
|
|
e9eef04993 | ||
|
|
598dd1d9fe | ||
|
|
9890a90e69 | ||
|
|
9da123ae5e | ||
|
|
4d4137aa28 | ||
|
|
022007078e | ||
|
|
31e5e61155 | ||
|
|
1d1453c5f2 | ||
|
|
e44e53b88e | ||
|
|
398791241e | ||
|
|
991535e567 | ||
|
|
2d7fbbfb68 | ||
|
|
dd0b9f3e95 | ||
|
|
7cb2bc84c8 | ||
|
|
b0e74bebab | ||
|
|
dfbe53dcca | ||
|
|
ebc3970b84 | ||
|
|
1ddcf6946a | ||
|
|
cfbaad38c8 | ||
|
|
67f6de010b | ||
|
|
2db608879a | ||
|
|
2c4a6ca90b | ||
|
|
829bf20449 | ||
|
|
be13322816 | ||
|
|
7f4a76a39b | ||
|
|
0fce761686 | ||
|
|
c88ff44518 | ||
|
|
2fdf35ac9d | ||
|
|
6cce3fefc5 | ||
|
|
eb2eaf8130 | ||
|
|
7bf712d0d4 | ||
|
|
7d024a6c05 | ||
|
|
434823bcb3 | ||
|
|
35d047db01 | ||
|
|
f1db6af1c5 | ||
|
|
4f3f2fb60d | ||
|
|
2623140c9a | ||
|
|
1db9d9d574 | ||
|
|
d046350240 | ||
|
|
cca4d249e9 | ||
|
|
bc8bc70f69 | ||
|
|
34c6c3d9cd | ||
|
|
9c8d7305f1 | ||
|
|
addcbfa7ee | ||
|
|
bbb036e732 | ||
|
|
9301409aec | ||
|
|
ab1c39de57 | ||
|
|
a4350d0fc2 | ||
|
|
2146c62c9e | ||
|
|
28e80a2d28 | ||
|
|
831db9ee2a | ||
|
|
4d69e0806e | ||
|
|
d37cd69e9b | ||
|
|
a2690aa5cb | ||
|
|
da020df0fd | ||
|
|
6c64ae6a01 |
12
.dockerignore
Normal file
12
.dockerignore
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
Makefile
|
||||||
|
acceptance
|
||||||
|
runner
|
||||||
|
hack
|
||||||
|
test-assets
|
||||||
|
config
|
||||||
|
charts
|
||||||
|
.github
|
||||||
|
.envrc
|
||||||
|
*.md
|
||||||
|
*.txt
|
||||||
|
*.sh
|
||||||
36
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
36
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
name: Bug report
|
||||||
|
about: Create a report to help us improve
|
||||||
|
title: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Describe the bug**
|
||||||
|
A clear and concise description of what the bug is.
|
||||||
|
|
||||||
|
**Checks**
|
||||||
|
|
||||||
|
- [ ] My actions-runner-controller version (v0.x.y) does support the feature
|
||||||
|
- [ ] I'm using an unreleased version of the controller I built from HEAD of the default branch
|
||||||
|
|
||||||
|
**To Reproduce**
|
||||||
|
Steps to reproduce the behavior:
|
||||||
|
1. Go to '...'
|
||||||
|
2. Click on '....'
|
||||||
|
3. Scroll down to '....'
|
||||||
|
4. See error
|
||||||
|
|
||||||
|
**Expected behavior**
|
||||||
|
A clear and concise description of what you expected to happen.
|
||||||
|
|
||||||
|
**Screenshots**
|
||||||
|
If applicable, add screenshots to help explain your problem.
|
||||||
|
|
||||||
|
**Environment (please complete the following information):**
|
||||||
|
- Controller Version [e.g. 0.18.2]
|
||||||
|
- Deployment Method [e.g. Helm and Kustomize ]
|
||||||
|
- Helm Chart Version [e.g. 0.11.0, if applicable]
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
Add any other context about the problem here.
|
||||||
19
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
19
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
name: Feature request
|
||||||
|
about: Suggest an idea for this project
|
||||||
|
title: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Is your feature request related to a problem? Please describe.**
|
||||||
|
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||||
|
|
||||||
|
**Describe the solution you'd like**
|
||||||
|
A clear and concise description of what you want to happen.
|
||||||
|
|
||||||
|
**Describe alternatives you've considered**
|
||||||
|
A clear and concise description of any alternative solutions or features you've considered.
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
Add any other context or screenshots about the feature request here.
|
||||||
34
.github/RELEASE_NOTE_TEMPLATE.md
vendored
Normal file
34
.github/RELEASE_NOTE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
# Release Note Template
|
||||||
|
|
||||||
|
This is the template of actions-runner-controller's release notes.
|
||||||
|
|
||||||
|
Whenever a new release is made, I start by manually copy-pasting this template onto the GitHub UI for creating the release.
|
||||||
|
|
||||||
|
I then walk-through all the changes, take sometime to think abount best one-sentence explanations to tell the users about changes, write it all,
|
||||||
|
and click the publish button.
|
||||||
|
|
||||||
|
If you think you can improve future release notes in any way, please do submit a pull request to change the template below.
|
||||||
|
|
||||||
|
Note that even though it looks like a Go template, I don't use any templating to generate the changelog.
|
||||||
|
It's just that I'm used to reading and intepreting Go template by myself, not a computer program :)
|
||||||
|
|
||||||
|
**Title**:
|
||||||
|
|
||||||
|
```
|
||||||
|
v{{ .Version }}: {{ .TitlesOfImportantChanges }}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Body**:
|
||||||
|
|
||||||
|
```
|
||||||
|
**CAUTION:** If you're using the Helm chart, beware to review changes to CRDs and do manually upgrade CRDs! Helm installs CRDs only on installing a chart. It doesn't automatically upgrade CRDs. Otherwise you end up with troubles like #427, #467, and #468. Please refer to the [UPGRADING](charts/actions-runner-controller/docs/UPGRADING.md) docs for the latest process.
|
||||||
|
|
||||||
|
This release includes the following changes from contributors. Thank you!
|
||||||
|
|
||||||
|
- @{{ .GitHubUser }} fixed {{ .Feature }} to not break when ... (#{{ .PullRequestNumber }})
|
||||||
|
- @{{ .GitHubUser }} enhanced {{ .Feature }} to ... (#{{ .PullRequestNumber }})
|
||||||
|
- @{{ .GitHubUser }} added {{ .Feature }} for ... (#{{ .PullRequestNumber }})
|
||||||
|
- @{{ .GitHubUser }} fixed {{ .Topic }} in the documentation so that ... (#{{ .PullRequestNumber }})
|
||||||
|
- @{{ .GitHubUser }} added {{ .Topic }} to the documentation (#{{ .PullRequestNumber }})
|
||||||
|
- @{{ .GitHubUser }} improved the documentation about {{ .Topic }} to also cover ... (#{{ .PullRequestNumber }})
|
||||||
|
```
|
||||||
66
.github/stale.yml
vendored
Normal file
66
.github/stale.yml
vendored
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
# Configuration for probot-stale - https://github.com/probot/stale
|
||||||
|
|
||||||
|
# Number of days of inactivity before an Issue or Pull Request becomes stale
|
||||||
|
daysUntilStale: 30
|
||||||
|
|
||||||
|
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
|
||||||
|
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
|
||||||
|
daysUntilClose: 14
|
||||||
|
|
||||||
|
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
|
||||||
|
onlyLabels: []
|
||||||
|
|
||||||
|
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
|
||||||
|
exemptLabels:
|
||||||
|
- pinned
|
||||||
|
- security
|
||||||
|
- enhancement
|
||||||
|
- refactor
|
||||||
|
- documentation
|
||||||
|
- chore
|
||||||
|
- needs-investigation
|
||||||
|
- bug
|
||||||
|
|
||||||
|
# Set to true to ignore issues in a project (defaults to false)
|
||||||
|
exemptProjects: false
|
||||||
|
|
||||||
|
# Set to true to ignore issues in a milestone (defaults to false)
|
||||||
|
exemptMilestones: false
|
||||||
|
|
||||||
|
# Set to true to ignore issues with an assignee (defaults to false)
|
||||||
|
exemptAssignees: false
|
||||||
|
|
||||||
|
# Label to use when marking as stale
|
||||||
|
staleLabel: stale
|
||||||
|
|
||||||
|
# Comment to post when marking as stale. Set to `false` to disable
|
||||||
|
markComment: >
|
||||||
|
This issue has been automatically marked as stale because it has not had
|
||||||
|
recent activity. It will be closed if no further activity occurs. Thank you
|
||||||
|
for your contributions.
|
||||||
|
|
||||||
|
# Comment to post when removing the stale label.
|
||||||
|
# unmarkComment: >
|
||||||
|
# Your comment here.
|
||||||
|
|
||||||
|
# Comment to post when closing a stale Issue or Pull Request.
|
||||||
|
# closeComment: >
|
||||||
|
# Your comment here.
|
||||||
|
|
||||||
|
# Limit the number of actions per hour, from 1-30. Default is 30
|
||||||
|
limitPerRun: 30
|
||||||
|
|
||||||
|
# Limit to only `issues` or `pulls`
|
||||||
|
# only: issues
|
||||||
|
|
||||||
|
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
|
||||||
|
# pulls:
|
||||||
|
# daysUntilStale: 30
|
||||||
|
# markComment: >
|
||||||
|
# This pull request has been automatically marked as stale because it has not had
|
||||||
|
# recent activity. It will be closed if no further activity occurs. Thank you
|
||||||
|
# for your contributions.
|
||||||
|
|
||||||
|
# issues:
|
||||||
|
# exemptLabels:
|
||||||
|
# - confirmed
|
||||||
123
.github/workflows/build-and-release-runners.yml
vendored
Normal file
123
.github/workflows/build-and-release-runners.yml
vendored
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
name: Build and Release Runners
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- '**'
|
||||||
|
paths:
|
||||||
|
- 'runner/**'
|
||||||
|
- .github/workflows/build-and-release-runners.yml
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
paths:
|
||||||
|
- runner/patched/*
|
||||||
|
- runner/Dockerfile
|
||||||
|
- runner/Dockerfile.ubuntu.1804
|
||||||
|
- runner/Dockerfile.dindrunner
|
||||||
|
- runner/entrypoint.sh
|
||||||
|
- .github/workflows/build-and-release-runners.yml
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Build ${{ matrix.name }}-ubuntu-${{ matrix.os-version }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- name: actions-runner
|
||||||
|
os-version: 20.04
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
- name: actions-runner
|
||||||
|
os-version: 18.04
|
||||||
|
dockerfile: Dockerfile.ubuntu.1804
|
||||||
|
- name: actions-runner-dind
|
||||||
|
os-version: 20.04
|
||||||
|
dockerfile: Dockerfile.dindrunner
|
||||||
|
env:
|
||||||
|
RUNNER_VERSION: 2.278.0
|
||||||
|
DOCKER_VERSION: 19.03.12
|
||||||
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
||||||
|
steps:
|
||||||
|
- name: Set outputs
|
||||||
|
id: vars
|
||||||
|
run: echo ::set-output name=sha_short::${GITHUB_SHA::7}
|
||||||
|
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v1
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v1
|
||||||
|
if: ${{ github.event_name == 'push' || github.event_name == 'release' }}
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USER }}
|
||||||
|
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and Push Versioned Tags
|
||||||
|
uses: docker/build-push-action@v2
|
||||||
|
with:
|
||||||
|
context: ./runner
|
||||||
|
file: ./runner/${{ matrix.dockerfile }}
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
|
build-args: |
|
||||||
|
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
||||||
|
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
||||||
|
tags: |
|
||||||
|
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-ubuntu-${{ matrix.os-version }}
|
||||||
|
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-ubuntu-${{ matrix.os-version }}-${{ steps.vars.outputs.sha_short }}
|
||||||
|
|
||||||
|
latest-tags:
|
||||||
|
if: ${{ github.event_name == 'push' || github.event_name == 'release' }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Build ${{ matrix.name }}-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- name: actions-runner
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
- name: actions-runner-dind
|
||||||
|
dockerfile: Dockerfile.dindrunner
|
||||||
|
env:
|
||||||
|
RUNNER_VERSION: 2.277.1
|
||||||
|
DOCKER_VERSION: 19.03.12
|
||||||
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
||||||
|
steps:
|
||||||
|
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v1
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v1
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USER }}
|
||||||
|
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and Push Latest Tag
|
||||||
|
uses: docker/build-push-action@v2
|
||||||
|
with:
|
||||||
|
context: ./runner
|
||||||
|
file: ./runner/${{ matrix.dockerfile }}
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
push: true
|
||||||
|
build-args: |
|
||||||
|
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
||||||
|
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
||||||
|
tags: |
|
||||||
|
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:latest
|
||||||
64
.github/workflows/build-runner.yml
vendored
64
.github/workflows/build-runner.yml
vendored
@@ -1,64 +0,0 @@
|
|||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- '**'
|
|
||||||
paths:
|
|
||||||
- 'runner/**'
|
|
||||||
- .github/workflows/build-runner.yml
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
paths:
|
|
||||||
- runner/patched/*
|
|
||||||
- runner/Dockerfile
|
|
||||||
- runner/dindrunner.Dockerfile
|
|
||||||
- runner/entrypoint.sh
|
|
||||||
- .github/workflows/build-runner.yml
|
|
||||||
name: Runner
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Build ${{ matrix.name }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- name: actions-runner
|
|
||||||
dockerfile: Dockerfile
|
|
||||||
- name: actions-runner-dind
|
|
||||||
dockerfile: dindrunner.Dockerfile
|
|
||||||
env:
|
|
||||||
RUNNER_VERSION: 2.275.1
|
|
||||||
DOCKER_VERSION: 19.03.12
|
|
||||||
DOCKERHUB_USERNAME: ${{ github.repository_owner }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
|
|
||||||
- name: Login to DockerHub
|
|
||||||
uses: docker/login-action@v1
|
|
||||||
if: ${{ github.event_name == 'push' }}
|
|
||||||
with:
|
|
||||||
username: ${{ github.repository_owner }}
|
|
||||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
|
||||||
|
|
||||||
- name: Build [and Push]
|
|
||||||
uses: docker/build-push-action@v2
|
|
||||||
with:
|
|
||||||
context: ./runner
|
|
||||||
file: ./runner/${{ matrix.dockerfile }}
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
|
||||||
build-args: |
|
|
||||||
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
|
||||||
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
|
||||||
tags: |
|
|
||||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}
|
|
||||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:latest
|
|
||||||
4
.github/workflows/on-push-lint-charts.yml
vendored
4
.github/workflows/on-push-lint-charts.yml
vendored
@@ -4,9 +4,11 @@ on:
|
|||||||
push:
|
push:
|
||||||
paths:
|
paths:
|
||||||
- 'charts/**'
|
- 'charts/**'
|
||||||
|
- '!charts/actions-runner-controller/docs/**'
|
||||||
|
- '!charts/actions-runner-controller/*.md'
|
||||||
- '.github/**'
|
- '.github/**'
|
||||||
|
- '!.github/*.md'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
env:
|
env:
|
||||||
KUBE_SCORE_VERSION: 1.10.0
|
KUBE_SCORE_VERSION: 1.10.0
|
||||||
HELM_VERSION: v3.4.1
|
HELM_VERSION: v3.4.1
|
||||||
|
|||||||
@@ -7,7 +7,9 @@ on:
|
|||||||
- main # assume that the branch name may change in future
|
- main # assume that the branch name may change in future
|
||||||
paths:
|
paths:
|
||||||
- 'charts/**'
|
- 'charts/**'
|
||||||
|
- '!charts/actions-runner-controller/docs/**'
|
||||||
- '.github/**'
|
- '.github/**'
|
||||||
|
- '!**.md'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
env:
|
env:
|
||||||
|
|||||||
13
.github/workflows/release.yml
vendored
13
.github/workflows/release.yml
vendored
@@ -7,8 +7,12 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Release
|
name: Release
|
||||||
env:
|
env:
|
||||||
DOCKERHUB_USERNAME: ${{ github.repository_owner }}
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
||||||
steps:
|
steps:
|
||||||
|
- name: Set outputs
|
||||||
|
id: vars
|
||||||
|
run: echo ::set-output name=sha_short::${GITHUB_SHA::7}
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
@@ -43,7 +47,7 @@ jobs:
|
|||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v1
|
||||||
with:
|
with:
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ secrets.DOCKER_USER }}
|
||||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||||
|
|
||||||
- name: Build and Push
|
- name: Build and Push
|
||||||
@@ -52,5 +56,8 @@ jobs:
|
|||||||
file: Dockerfile
|
file: Dockerfile
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
push: true
|
push: true
|
||||||
tags: ${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}
|
tags: |
|
||||||
|
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:latest
|
||||||
|
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}
|
||||||
|
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}-${{ steps.vars.outputs.sha_short }}
|
||||||
|
|
||||||
|
|||||||
3
.github/workflows/test.yaml
vendored
3
.github/workflows/test.yaml
vendored
@@ -6,6 +6,9 @@ on:
|
|||||||
- master
|
- master
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- 'runner/**'
|
- 'runner/**'
|
||||||
|
- .github/workflows/build-and-release-runners.yml
|
||||||
|
- '*.md'
|
||||||
|
- '.gitignore'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
|
|||||||
12
.github/workflows/wip.yml
vendored
12
.github/workflows/wip.yml
vendored
@@ -4,13 +4,15 @@ on:
|
|||||||
- master
|
- master
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- "runner/**"
|
- "runner/**"
|
||||||
|
- "**.md"
|
||||||
|
- ".gitignore"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: release-latest
|
name: release-latest
|
||||||
env:
|
env:
|
||||||
DOCKERHUB_USERNAME: ${{ github.repository_owner }}
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
@@ -27,14 +29,16 @@ jobs:
|
|||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v1
|
||||||
with:
|
with:
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ secrets.DOCKER_USER }}
|
||||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||||
|
|
||||||
|
# Considered unstable builds
|
||||||
|
# See Issue #285, PR #286, and PR #323 for more information
|
||||||
- name: Build and Push
|
- name: Build and Push
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v2
|
||||||
with:
|
with:
|
||||||
file: Dockerfile
|
file: Dockerfile
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
push: true
|
push: true
|
||||||
tags: ${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:latest
|
tags: |
|
||||||
|
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:canary
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -26,3 +26,6 @@ bin
|
|||||||
|
|
||||||
.envrc
|
.envrc
|
||||||
*.pem
|
*.pem
|
||||||
|
|
||||||
|
# OS
|
||||||
|
.DS_STORE
|
||||||
8
CONTRIBUTING.md
Normal file
8
CONTRIBUTING.md
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
# Contributing
|
||||||
|
|
||||||
|
### Helm Version Bumps
|
||||||
|
|
||||||
|
**Chart Version :** When bumping the chart version follow semantic versioning https://semver.org/<br />
|
||||||
|
**App Version :** When bumping the app version you will also need to bump the chart version too. Again, follow semantic versioning when bumping the chart.
|
||||||
|
|
||||||
|
To determine if you need to bump the MAJOR, MINOR or PATCH versions you will need to review the changes between the previous app version and the new app version and / or ask for a maintainer to advise.
|
||||||
@@ -22,7 +22,8 @@ COPY . .
|
|||||||
RUN export GOOS=$(echo ${TARGETPLATFORM} | cut -d / -f1) && \
|
RUN export GOOS=$(echo ${TARGETPLATFORM} | cut -d / -f1) && \
|
||||||
export GOARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) && \
|
export GOARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) && \
|
||||||
GOARM=$(echo ${TARGETPLATFORM} | cut -d / -f3 | cut -c2-) && \
|
GOARM=$(echo ${TARGETPLATFORM} | cut -d / -f3 | cut -c2-) && \
|
||||||
go build -a -o manager main.go
|
go build -a -o manager main.go && \
|
||||||
|
go build -a -o github-webhook-server ./cmd/githubwebhookserver
|
||||||
|
|
||||||
# Use distroless as minimal base image to package the manager binary
|
# Use distroless as minimal base image to package the manager binary
|
||||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||||
@@ -31,6 +32,7 @@ FROM gcr.io/distroless/static:nonroot
|
|||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
|
||||||
COPY --from=builder /workspace/manager .
|
COPY --from=builder /workspace/manager .
|
||||||
|
COPY --from=builder /workspace/github-webhook-server .
|
||||||
|
|
||||||
USER nonroot:nonroot
|
USER nonroot:nonroot
|
||||||
|
|
||||||
|
|||||||
149
Makefile
149
Makefile
@@ -1,5 +1,17 @@
|
|||||||
|
ifdef DOCKER_USER
|
||||||
|
NAME ?= ${DOCKER_USER}/actions-runner-controller
|
||||||
|
else
|
||||||
NAME ?= summerwind/actions-runner-controller
|
NAME ?= summerwind/actions-runner-controller
|
||||||
|
endif
|
||||||
|
DOCKER_USER ?= $(shell echo ${NAME} | cut -d / -f1)
|
||||||
VERSION ?= latest
|
VERSION ?= latest
|
||||||
|
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||||
|
RUNNER_TAG ?= ${VERSION}
|
||||||
|
TEST_REPO ?= ${DOCKER_USER}/actions-runner-controller
|
||||||
|
TEST_ORG ?=
|
||||||
|
TEST_ORG_REPO ?=
|
||||||
|
SYNC_PERIOD ?= 5m
|
||||||
|
|
||||||
# From https://github.com/VictoriaMetrics/operator/pull/44
|
# From https://github.com/VictoriaMetrics/operator/pull/44
|
||||||
YAML_DROP=$(YQ) delete --inplace
|
YAML_DROP=$(YQ) delete --inplace
|
||||||
YAML_DROP_PREFIX=spec.validation.openAPIV3Schema.properties.spec.properties
|
YAML_DROP_PREFIX=spec.validation.openAPIV3Schema.properties.spec.properties
|
||||||
@@ -14,6 +26,8 @@ else
|
|||||||
GOBIN=$(shell go env GOBIN)
|
GOBIN=$(shell go env GOBIN)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
TEST_ASSETS=$(PWD)/test-assets
|
||||||
|
|
||||||
# default list of platforms for which multiarch image is built
|
# default list of platforms for which multiarch image is built
|
||||||
ifeq (${PLATFORMS}, )
|
ifeq (${PLATFORMS}, )
|
||||||
export PLATFORMS="linux/amd64,linux/arm64"
|
export PLATFORMS="linux/amd64,linux/arm64"
|
||||||
@@ -37,6 +51,13 @@ all: manager
|
|||||||
test: generate fmt vet manifests
|
test: generate fmt vet manifests
|
||||||
go test ./... -coverprofile cover.out
|
go test ./... -coverprofile cover.out
|
||||||
|
|
||||||
|
test-with-deps: kube-apiserver etcd kubectl
|
||||||
|
# See https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#pkg-constants
|
||||||
|
TEST_ASSET_KUBE_APISERVER=$(KUBE_APISERVER_BIN) \
|
||||||
|
TEST_ASSET_ETCD=$(ETCD_BIN) \
|
||||||
|
TEST_ASSET_KUBECTL=$(KUBECTL_BIN) \
|
||||||
|
make test
|
||||||
|
|
||||||
# Build manager binary
|
# Build manager binary
|
||||||
manager: generate fmt vet
|
manager: generate fmt vet
|
||||||
go build -o bin/manager main.go
|
go build -o bin/manager main.go
|
||||||
@@ -96,12 +117,9 @@ generate: controller-gen
|
|||||||
$(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths="./..."
|
$(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths="./..."
|
||||||
|
|
||||||
# Build the docker image
|
# Build the docker image
|
||||||
docker-build: test
|
docker-build:
|
||||||
docker build . -t ${NAME}:${VERSION}
|
docker build . -t ${NAME}:${VERSION}
|
||||||
|
docker build runner -t ${RUNNER_NAME}:${RUNNER_TAG} --build-arg TARGETPLATFORM=$(shell arch)
|
||||||
# Push the docker image
|
|
||||||
docker-push:
|
|
||||||
docker push ${NAME}:${VERSION}
|
|
||||||
|
|
||||||
docker-buildx:
|
docker-buildx:
|
||||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
@@ -115,6 +133,11 @@ docker-buildx:
|
|||||||
-f Dockerfile \
|
-f Dockerfile \
|
||||||
. ${PUSH_ARG}
|
. ${PUSH_ARG}
|
||||||
|
|
||||||
|
# Push the docker image
|
||||||
|
docker-push:
|
||||||
|
docker push ${NAME}:${VERSION}
|
||||||
|
docker push ${RUNNER_NAME}:${RUNNER_TAG}
|
||||||
|
|
||||||
# Generate the release manifest file
|
# Generate the release manifest file
|
||||||
release: manifests
|
release: manifests
|
||||||
cd config/manager && kustomize edit set image controller=${NAME}:${VERSION}
|
cd config/manager && kustomize edit set image controller=${NAME}:${VERSION}
|
||||||
@@ -126,19 +149,41 @@ release/clean:
|
|||||||
rm -rf release
|
rm -rf release
|
||||||
|
|
||||||
.PHONY: acceptance
|
.PHONY: acceptance
|
||||||
acceptance: release/clean docker-build docker-push release
|
acceptance: release/clean acceptance/pull docker-build release
|
||||||
ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/run
|
||||||
ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/run
|
||||||
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/run
|
||||||
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/run
|
||||||
|
|
||||||
|
acceptance/run: acceptance/kind acceptance/load acceptance/setup acceptance/deploy acceptance/tests acceptance/teardown
|
||||||
|
|
||||||
acceptance/kind:
|
acceptance/kind:
|
||||||
kind create cluster --name acceptance
|
kind create cluster --name acceptance --config acceptance/kind.yaml
|
||||||
|
|
||||||
|
# Set TMPDIR to somewhere under $HOME when you use docker installed with Ubuntu snap
|
||||||
|
# Otherwise `load docker-image` fail while running `docker save`.
|
||||||
|
# See https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap
|
||||||
|
acceptance/load:
|
||||||
|
kind load docker-image ${NAME}:${VERSION} --name acceptance
|
||||||
|
kind load docker-image quay.io/brancz/kube-rbac-proxy:v0.10.0 --name acceptance
|
||||||
|
kind load docker-image ${RUNNER_NAME}:${RUNNER_TAG} --name acceptance
|
||||||
|
kind load docker-image docker:dind --name acceptance
|
||||||
|
kind load docker-image quay.io/jetstack/cert-manager-controller:v1.0.4 --name acceptance
|
||||||
|
kind load docker-image quay.io/jetstack/cert-manager-cainjector:v1.0.4 --name acceptance
|
||||||
|
kind load docker-image quay.io/jetstack/cert-manager-webhook:v1.0.4 --name acceptance
|
||||||
kubectl cluster-info --context kind-acceptance
|
kubectl cluster-info --context kind-acceptance
|
||||||
|
|
||||||
|
# Pull the docker images for acceptance
|
||||||
|
acceptance/pull:
|
||||||
|
docker pull quay.io/brancz/kube-rbac-proxy:v0.10.0
|
||||||
|
docker pull docker:dind
|
||||||
|
docker pull quay.io/jetstack/cert-manager-controller:v1.0.4
|
||||||
|
docker pull quay.io/jetstack/cert-manager-cainjector:v1.0.4
|
||||||
|
docker pull quay.io/jetstack/cert-manager-webhook:v1.0.4
|
||||||
|
|
||||||
acceptance/setup:
|
acceptance/setup:
|
||||||
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.0.4/cert-manager.yaml #kubectl create namespace actions-runner-system
|
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.0.4/cert-manager.yaml #kubectl create namespace actions-runner-system
|
||||||
kubectl -n cert-manager wait deploy/cert-manager-cainjector --for condition=available --timeout 60s
|
kubectl -n cert-manager wait deploy/cert-manager-cainjector --for condition=available --timeout 90s
|
||||||
kubectl -n cert-manager wait deploy/cert-manager-webhook --for condition=available --timeout 60s
|
kubectl -n cert-manager wait deploy/cert-manager-webhook --for condition=available --timeout 60s
|
||||||
kubectl -n cert-manager wait deploy/cert-manager --for condition=available --timeout 60s
|
kubectl -n cert-manager wait deploy/cert-manager --for condition=available --timeout 60s
|
||||||
kubectl create namespace actions-runner-system || true
|
kubectl create namespace actions-runner-system || true
|
||||||
@@ -148,8 +193,12 @@ acceptance/setup:
|
|||||||
acceptance/teardown:
|
acceptance/teardown:
|
||||||
kind delete cluster --name acceptance
|
kind delete cluster --name acceptance
|
||||||
|
|
||||||
acceptance/tests:
|
acceptance/deploy:
|
||||||
|
NAME=${NAME} DOCKER_USER=${DOCKER_USER} VERSION=${VERSION} RUNNER_NAME=${RUNNER_NAME} RUNNER_TAG=${RUNNER_TAG} TEST_REPO=${TEST_REPO} \
|
||||||
|
TEST_ORG=${TEST_ORG} TEST_ORG_REPO=${TEST_ORG_REPO} SYNC_PERIOD=${SYNC_PERIOD} \
|
||||||
acceptance/deploy.sh
|
acceptance/deploy.sh
|
||||||
|
|
||||||
|
acceptance/tests:
|
||||||
acceptance/checks.sh
|
acceptance/checks.sh
|
||||||
|
|
||||||
# Upload release file to GitHub.
|
# Upload release file to GitHub.
|
||||||
@@ -191,3 +240,77 @@ ifeq (, $(wildcard $(GOBIN)/yq))
|
|||||||
}
|
}
|
||||||
endif
|
endif
|
||||||
YQ=$(GOBIN)/yq
|
YQ=$(GOBIN)/yq
|
||||||
|
|
||||||
|
OS_NAME := $(shell uname -s | tr A-Z a-z)
|
||||||
|
|
||||||
|
# find or download etcd
|
||||||
|
etcd:
|
||||||
|
ifeq (, $(shell which etcd))
|
||||||
|
ifeq (, $(wildcard $(TEST_ASSETS)/etcd))
|
||||||
|
@{ \
|
||||||
|
set -xe ;\
|
||||||
|
INSTALL_TMP_DIR=$$(mktemp -d) ;\
|
||||||
|
cd $$INSTALL_TMP_DIR ;\
|
||||||
|
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||||
|
mkdir -p $(TEST_ASSETS) ;\
|
||||||
|
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||||
|
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/etcd $(TEST_ASSETS)/etcd ;\
|
||||||
|
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
|
||||||
|
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
|
||||||
|
rm -rf $$INSTALL_TMP_DIR ;\
|
||||||
|
}
|
||||||
|
ETCD_BIN=$(TEST_ASSETS)/etcd
|
||||||
|
else
|
||||||
|
ETCD_BIN=$(TEST_ASSETS)/etcd
|
||||||
|
endif
|
||||||
|
else
|
||||||
|
ETCD_BIN=$(shell which etcd)
|
||||||
|
endif
|
||||||
|
|
||||||
|
# find or download kube-apiserver
|
||||||
|
kube-apiserver:
|
||||||
|
ifeq (, $(shell which kube-apiserver))
|
||||||
|
ifeq (, $(wildcard $(TEST_ASSETS)/kube-apiserver))
|
||||||
|
@{ \
|
||||||
|
set -xe ;\
|
||||||
|
INSTALL_TMP_DIR=$$(mktemp -d) ;\
|
||||||
|
cd $$INSTALL_TMP_DIR ;\
|
||||||
|
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||||
|
mkdir -p $(TEST_ASSETS) ;\
|
||||||
|
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||||
|
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/etcd $(TEST_ASSETS)/etcd ;\
|
||||||
|
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
|
||||||
|
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
|
||||||
|
rm -rf $$INSTALL_TMP_DIR ;\
|
||||||
|
}
|
||||||
|
KUBE_APISERVER_BIN=$(TEST_ASSETS)/kube-apiserver
|
||||||
|
else
|
||||||
|
KUBE_APISERVER_BIN=$(TEST_ASSETS)/kube-apiserver
|
||||||
|
endif
|
||||||
|
else
|
||||||
|
KUBE_APISERVER_BIN=$(shell which kube-apiserver)
|
||||||
|
endif
|
||||||
|
|
||||||
|
# find or download kubectl
|
||||||
|
kubectl:
|
||||||
|
ifeq (, $(shell which kubectl))
|
||||||
|
ifeq (, $(wildcard $(TEST_ASSETS)/kubectl))
|
||||||
|
@{ \
|
||||||
|
set -xe ;\
|
||||||
|
INSTALL_TMP_DIR=$$(mktemp -d) ;\
|
||||||
|
cd $$INSTALL_TMP_DIR ;\
|
||||||
|
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||||
|
mkdir -p $(TEST_ASSETS) ;\
|
||||||
|
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||||
|
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/etcd $(TEST_ASSETS)/etcd ;\
|
||||||
|
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
|
||||||
|
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
|
||||||
|
rm -rf $$INSTALL_TMP_DIR ;\
|
||||||
|
}
|
||||||
|
KUBECTL_BIN=$(TEST_ASSETS)/kubectl
|
||||||
|
else
|
||||||
|
KUBECTL_BIN=$(TEST_ASSETS)/kubectl
|
||||||
|
endif
|
||||||
|
else
|
||||||
|
KUBECTL_BIN=$(shell which kubectl)
|
||||||
|
endif
|
||||||
|
|||||||
@@ -12,6 +12,9 @@ done
|
|||||||
|
|
||||||
echo Found runner ${runner_name}.
|
echo Found runner ${runner_name}.
|
||||||
|
|
||||||
|
# Wait a bit to make sure the runner pod is created before looking for it.
|
||||||
|
sleep 2
|
||||||
|
|
||||||
pod_name=
|
pod_name=
|
||||||
|
|
||||||
while [ -z "${pod_name}" ]; do
|
while [ -z "${pod_name}" ]; do
|
||||||
@@ -24,6 +27,6 @@ echo Found pod ${pod_name}.
|
|||||||
|
|
||||||
echo Waiting for pod ${runner_name} to become ready... 1>&2
|
echo Waiting for pod ${runner_name} to become ready... 1>&2
|
||||||
|
|
||||||
kubectl wait pod/${runner_name} --for condition=ready --timeout 180s
|
kubectl wait pod/${runner_name} --for condition=ready --timeout 270s
|
||||||
|
|
||||||
echo All tests passed. 1>&2
|
echo All tests passed. 1>&2
|
||||||
|
|||||||
@@ -4,10 +4,14 @@ set -e
|
|||||||
|
|
||||||
tpe=${ACCEPTANCE_TEST_SECRET_TYPE}
|
tpe=${ACCEPTANCE_TEST_SECRET_TYPE}
|
||||||
|
|
||||||
|
VALUES_FILE=${VALUES_FILE:-$(dirname $0)/values.yaml}
|
||||||
|
|
||||||
if [ "${tpe}" == "token" ]; then
|
if [ "${tpe}" == "token" ]; then
|
||||||
|
if ! kubectl get secret controller-manager -n actions-runner-system >/dev/null; then
|
||||||
kubectl create secret generic controller-manager \
|
kubectl create secret generic controller-manager \
|
||||||
-n actions-runner-system \
|
-n actions-runner-system \
|
||||||
--from-literal=github_token=${GITHUB_TOKEN:?GITHUB_TOKEN must not be empty}
|
--from-literal=github_token=${GITHUB_TOKEN:?GITHUB_TOKEN must not be empty}
|
||||||
|
fi
|
||||||
elif [ "${tpe}" == "app" ]; then
|
elif [ "${tpe}" == "app" ]; then
|
||||||
kubectl create secret generic controller-manager \
|
kubectl create secret generic controller-manager \
|
||||||
-n actions-runner-system \
|
-n actions-runner-system \
|
||||||
@@ -26,17 +30,37 @@ if [ "${tool}" == "helm" ]; then
|
|||||||
charts/actions-runner-controller \
|
charts/actions-runner-controller \
|
||||||
-n actions-runner-system \
|
-n actions-runner-system \
|
||||||
--create-namespace \
|
--create-namespace \
|
||||||
--set syncPeriod=5m
|
--set syncPeriod=${SYNC_PERIOD} \
|
||||||
kubectl -n actions-runner-system wait deploy/actions-runner-controller --for condition=available
|
--set authSecret.create=false \
|
||||||
|
--set image.repository=${NAME} \
|
||||||
|
--set image.tag=${VERSION} \
|
||||||
|
-f ${VALUES_FILE}
|
||||||
|
kubectl -n actions-runner-system wait deploy/actions-runner-controller --for condition=available --timeout 60s
|
||||||
else
|
else
|
||||||
kubectl apply \
|
kubectl apply \
|
||||||
-n actions-runner-system \
|
-n actions-runner-system \
|
||||||
-f release/actions-runner-controller.yaml
|
-f release/actions-runner-controller.yaml
|
||||||
kubectl -n actions-runner-system wait deploy/controller-manager --for condition=available --timeout 60s
|
kubectl -n actions-runner-system wait deploy/controller-manager --for condition=available --timeout 120s
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Adhocly wait for some time until actions-runner-controller's admission webhook gets ready
|
# Adhocly wait for some time until actions-runner-controller's admission webhook gets ready
|
||||||
sleep 20
|
sleep 20
|
||||||
|
|
||||||
kubectl apply \
|
if [ -n "${TEST_REPO}" ]; then
|
||||||
-f acceptance/testdata/runnerdeploy.yaml
|
cat acceptance/testdata/runnerdeploy.yaml | envsubst | kubectl apply -f -
|
||||||
|
cat acceptance/testdata/hra.yaml | envsubst | kubectl apply -f -
|
||||||
|
else
|
||||||
|
echo 'Skipped deploying runnerdeployment and hra. Set TEST_REPO to "yourorg/yourrepo" to deploy.'
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${TEST_ORG}" ]; then
|
||||||
|
cat acceptance/testdata/org.runnerdeploy.yaml | envsubst | kubectl apply -f -
|
||||||
|
|
||||||
|
if [ -n "${TEST_ORG_REPO}" ]; then
|
||||||
|
cat acceptance/testdata/org.hra.yaml | envsubst | kubectl apply -f -
|
||||||
|
else
|
||||||
|
echo 'Skipped deploying organizational hra. Set TEST_ORG_REPO to "yourorg/yourrepo" to deploy.'
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo 'Skipped deploying organizational runnerdeployment. Set TEST_ORG to deploy.'
|
||||||
|
fi
|
||||||
|
|||||||
10
acceptance/kind.yaml
Normal file
10
acceptance/kind.yaml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
apiVersion: kind.x-k8s.io/v1alpha4
|
||||||
|
kind: Cluster
|
||||||
|
nodes:
|
||||||
|
- role: control-plane
|
||||||
|
extraPortMappings:
|
||||||
|
- containerPort: 31000
|
||||||
|
hostPort: 31000
|
||||||
|
listenAddress: "0.0.0.0"
|
||||||
|
protocol: tcp
|
||||||
|
#- role: worker
|
||||||
36
acceptance/pipelines/eks-integration-tests.yaml
Normal file
36
acceptance/pipelines/eks-integration-tests.yaml
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
name: EKS Integration Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
env:
|
||||||
|
IRSA_ROLE_ARN:
|
||||||
|
ASSUME_ROLE_ARN:
|
||||||
|
AWS_REGION:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
assume-role-in-runner-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
steps:
|
||||||
|
- name: Test aws-actions/configure-aws-credentials Action
|
||||||
|
uses: aws-actions/configure-aws-credentials@v1
|
||||||
|
with:
|
||||||
|
aws-region: ${{ env.AWS_REGION }}
|
||||||
|
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
|
||||||
|
role-duration-seconds: 900
|
||||||
|
assume-role-in-container-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
container:
|
||||||
|
image: amazon/aws-cli
|
||||||
|
env:
|
||||||
|
AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
|
||||||
|
AWS_ROLE_ARN: ${{ env.IRSA_ROLE_ARN }}
|
||||||
|
volumes:
|
||||||
|
- /var/run/secrets/eks.amazonaws.com/serviceaccount/token:/var/run/secrets/eks.amazonaws.com/serviceaccount/token
|
||||||
|
steps:
|
||||||
|
- name: Test aws-actions/configure-aws-credentials Action in container
|
||||||
|
uses: aws-actions/configure-aws-credentials@v1
|
||||||
|
with:
|
||||||
|
aws-region: ${{ env.AWS_REGION }}
|
||||||
|
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
|
||||||
|
role-duration-seconds: 900
|
||||||
83
acceptance/pipelines/runner-integration-tests.yaml
Normal file
83
acceptance/pipelines/runner-integration-tests.yaml
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
name: Runner Integration Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
env:
|
||||||
|
ImageOS: ubuntu18 # Used by ruby/setup-ruby action | Update me for the runner OS version you are testing against
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
run-step-in-container-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
container:
|
||||||
|
image: alpine
|
||||||
|
steps:
|
||||||
|
- name: Test we are working in the container
|
||||||
|
run: |
|
||||||
|
if [[ $(sed -n '2p' < /etc/os-release | cut -d "=" -f2) != "alpine" ]]; then
|
||||||
|
echo "::error ::Failed OS detection test, could not match /etc/os-release with alpine. Are we really running in the container?"
|
||||||
|
echo "/etc/os-release below:"
|
||||||
|
cat /etc/os-release
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
setup-python-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
steps:
|
||||||
|
- name: Print native Python environment
|
||||||
|
run: |
|
||||||
|
which python
|
||||||
|
python --version
|
||||||
|
- uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: 3.9
|
||||||
|
- name: Test actions/setup-python works
|
||||||
|
run: |
|
||||||
|
VERSION=$(python --version 2>&1 | cut -d ' ' -f2 | cut -d '.' -f1-2)
|
||||||
|
if [[ $VERSION != '3.9' ]]; then
|
||||||
|
echo "Python version detected : $(python --version 2>&1)"
|
||||||
|
echo "::error ::Detected python failed setup version test, could not match version with version specified in the setup action"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "Python version detected : $(python --version 2>&1)"
|
||||||
|
fi
|
||||||
|
setup-node-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
steps:
|
||||||
|
- uses: actions/setup-node@v2
|
||||||
|
with:
|
||||||
|
node-version: '12'
|
||||||
|
- name: Test actions/setup-node works
|
||||||
|
run: |
|
||||||
|
VERSION=$(node --version | cut -c 2- | cut -d '.' -f1)
|
||||||
|
if [[ $VERSION != '12' ]]; then
|
||||||
|
echo "Node version detected : $(node --version 2>&1)"
|
||||||
|
echo "::error ::Detected node failed setup version test, could not match version with version specified in the setup action"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "Node version detected : $(node --version 2>&1)"
|
||||||
|
fi
|
||||||
|
setup-ruby-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
steps:
|
||||||
|
- uses: ruby/setup-ruby@v1
|
||||||
|
with:
|
||||||
|
ruby-version: 3.0
|
||||||
|
bundler-cache: true
|
||||||
|
- name: Test ruby/setup-ruby works
|
||||||
|
run: |
|
||||||
|
VERSION=$(ruby --version | cut -d ' ' -f2 | cut -d '.' -f1-2)
|
||||||
|
if [[ $VERSION != '3.0' ]]; then
|
||||||
|
echo "Ruby version detected : $(ruby --version 2>&1)"
|
||||||
|
echo "::error ::Detected ruby failed setup version test, could not match version with version specified in the setup action"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "Ruby version detected : $(ruby --version 2>&1)"
|
||||||
|
fi
|
||||||
|
python-shell-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
steps:
|
||||||
|
- name: Test Python shell works
|
||||||
|
run: |
|
||||||
|
import os
|
||||||
|
print(os.environ['PATH'])
|
||||||
|
shell: python
|
||||||
25
acceptance/testdata/hra.yaml
vendored
Normal file
25
acceptance/testdata/hra.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: HorizontalRunnerAutoscaler
|
||||||
|
metadata:
|
||||||
|
name: actions-runner-aos-autoscaler
|
||||||
|
spec:
|
||||||
|
scaleTargetRef:
|
||||||
|
name: example-runnerdeploy
|
||||||
|
scaleUpTriggers:
|
||||||
|
- githubEvent:
|
||||||
|
checkRun:
|
||||||
|
types: ["created"]
|
||||||
|
status: "queued"
|
||||||
|
amount: 1
|
||||||
|
duration: "1m"
|
||||||
|
minReplicas: 0
|
||||||
|
maxReplicas: 5
|
||||||
|
metrics:
|
||||||
|
- type: PercentageRunnersBusy
|
||||||
|
scaleUpThreshold: '0.75'
|
||||||
|
scaleDownThreshold: '0.3'
|
||||||
|
scaleUpFactor: '2'
|
||||||
|
scaleDownFactor: '0.5'
|
||||||
|
- type: TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||||
|
repositoryNames:
|
||||||
|
- ${TEST_REPO}
|
||||||
35
acceptance/testdata/org.hra.yaml
vendored
Normal file
35
acceptance/testdata/org.hra.yaml
vendored
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: HorizontalRunnerAutoscaler
|
||||||
|
metadata:
|
||||||
|
name: org
|
||||||
|
spec:
|
||||||
|
scaleTargetRef:
|
||||||
|
name: org-runnerdeploy
|
||||||
|
scaleUpTriggers:
|
||||||
|
- githubEvent:
|
||||||
|
checkRun:
|
||||||
|
types: ["created"]
|
||||||
|
status: "queued"
|
||||||
|
amount: 1
|
||||||
|
duration: "1m"
|
||||||
|
scheduledOverrides:
|
||||||
|
- startTime: "2021-05-11T16:05:00+09:00"
|
||||||
|
endTime: "2021-05-11T16:40:00+09:00"
|
||||||
|
minReplicas: 2
|
||||||
|
- startTime: "2021-05-01T00:00:00+09:00"
|
||||||
|
endTime: "2021-05-03T00:00:00+09:00"
|
||||||
|
recurrenceRule:
|
||||||
|
frequency: Weekly
|
||||||
|
untilTime: "2022-05-01T00:00:00+09:00"
|
||||||
|
minReplicas: 0
|
||||||
|
minReplicas: 0
|
||||||
|
maxReplicas: 5
|
||||||
|
metrics:
|
||||||
|
- type: PercentageRunnersBusy
|
||||||
|
scaleUpThreshold: '0.75'
|
||||||
|
scaleDownThreshold: '0.3'
|
||||||
|
scaleUpFactor: '2'
|
||||||
|
scaleDownFactor: '0.5'
|
||||||
|
- type: TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||||
|
repositoryNames:
|
||||||
|
- ${TEST_ORG_REPO}
|
||||||
37
acceptance/testdata/org.runnerdeploy.yaml
vendored
Normal file
37
acceptance/testdata/org.runnerdeploy.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: RunnerDeployment
|
||||||
|
metadata:
|
||||||
|
name: org-runnerdeploy
|
||||||
|
spec:
|
||||||
|
# replicas: 1
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
organization: ${TEST_ORG}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Custom runner image
|
||||||
|
#
|
||||||
|
image: ${RUNNER_NAME}:${RUNNER_TAG}
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
#
|
||||||
|
# dockerd within runner container
|
||||||
|
#
|
||||||
|
## Replace `mumoshu/actions-runner-dind:dev` with your dind image
|
||||||
|
#dockerdWithinRunnerContainer: true
|
||||||
|
#image: mumoshu/actions-runner-dind:dev
|
||||||
|
|
||||||
|
#
|
||||||
|
# Set the MTU used by dockerd-managed network interfaces (including docker-build-ubuntu)
|
||||||
|
#
|
||||||
|
#dockerMTU: 1450
|
||||||
|
|
||||||
|
#Runner group
|
||||||
|
# labels:
|
||||||
|
# - "mylabel 1"
|
||||||
|
# - "mylabel 2"
|
||||||
|
|
||||||
|
#
|
||||||
|
# Non-standard working directory
|
||||||
|
#
|
||||||
|
# workDir: "/"
|
||||||
30
acceptance/testdata/runnerdeploy.yaml
vendored
30
acceptance/testdata/runnerdeploy.yaml
vendored
@@ -6,4 +6,32 @@ spec:
|
|||||||
# replicas: 1
|
# replicas: 1
|
||||||
template:
|
template:
|
||||||
spec:
|
spec:
|
||||||
repository: mumoshu/actions-runner-controller-ci
|
repository: ${TEST_REPO}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Custom runner image
|
||||||
|
#
|
||||||
|
image: ${RUNNER_NAME}:${RUNNER_TAG}
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
#
|
||||||
|
# dockerd within runner container
|
||||||
|
#
|
||||||
|
## Replace `mumoshu/actions-runner-dind:dev` with your dind image
|
||||||
|
#dockerdWithinRunnerContainer: true
|
||||||
|
#image: mumoshu/actions-runner-dind:dev
|
||||||
|
|
||||||
|
#
|
||||||
|
# Set the MTU used by dockerd-managed network interfaces (including docker-build-ubuntu)
|
||||||
|
#
|
||||||
|
#dockerMTU: 1450
|
||||||
|
|
||||||
|
#Runner group
|
||||||
|
# labels:
|
||||||
|
# - "mylabel 1"
|
||||||
|
# - "mylabel 2"
|
||||||
|
|
||||||
|
#
|
||||||
|
# Non-standard working directory
|
||||||
|
#
|
||||||
|
# workDir: "/"
|
||||||
|
|||||||
20
acceptance/values.yaml
Normal file
20
acceptance/values.yaml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Set actions-runner-controller settings for testing
|
||||||
|
githubAPICacheDuration: 10s
|
||||||
|
githubWebhookServer:
|
||||||
|
enabled: true
|
||||||
|
labels: {}
|
||||||
|
replicaCount: 1
|
||||||
|
syncPeriod: 10m
|
||||||
|
secret:
|
||||||
|
create: true
|
||||||
|
name: "github-webhook-server"
|
||||||
|
### GitHub Webhook Configuration
|
||||||
|
#github_webhook_secret_token: ""
|
||||||
|
service:
|
||||||
|
type: NodePort
|
||||||
|
ports:
|
||||||
|
- port: 80
|
||||||
|
targetPort: http
|
||||||
|
protocol: TCP
|
||||||
|
name: http
|
||||||
|
nodePort: 31000
|
||||||
@@ -41,6 +41,68 @@ type HorizontalRunnerAutoscalerSpec struct {
|
|||||||
// Metrics is the collection of various metric targets to calculate desired number of runners
|
// Metrics is the collection of various metric targets to calculate desired number of runners
|
||||||
// +optional
|
// +optional
|
||||||
Metrics []MetricSpec `json:"metrics,omitempty"`
|
Metrics []MetricSpec `json:"metrics,omitempty"`
|
||||||
|
|
||||||
|
// ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
|
||||||
|
// on each webhook requested received by the webhookBasedAutoscaler.
|
||||||
|
//
|
||||||
|
// This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
|
||||||
|
//
|
||||||
|
// Note that the added runners remain until the next sync period at least,
|
||||||
|
// and they may or may not be used by GitHub Actions depending on the timing.
|
||||||
|
// They are intended to be used to gain "resource slack" immediately after you
|
||||||
|
// receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
|
||||||
|
ScaleUpTriggers []ScaleUpTrigger `json:"scaleUpTriggers,omitempty"`
|
||||||
|
|
||||||
|
CapacityReservations []CapacityReservation `json:"capacityReservations,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
|
||||||
|
|
||||||
|
// ScheduledOverrides is the list of ScheduledOverride.
|
||||||
|
// It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||||
|
// The earlier a scheduled override is, the higher it is prioritized.
|
||||||
|
// +optional
|
||||||
|
ScheduledOverrides []ScheduledOverride `json:"scheduledOverrides,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ScaleUpTrigger struct {
|
||||||
|
GitHubEvent *GitHubEventScaleUpTriggerSpec `json:"githubEvent,omitempty"`
|
||||||
|
Amount int `json:"amount,omitempty"`
|
||||||
|
Duration metav1.Duration `json:"duration,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GitHubEventScaleUpTriggerSpec struct {
|
||||||
|
CheckRun *CheckRunSpec `json:"checkRun,omitempty"`
|
||||||
|
PullRequest *PullRequestSpec `json:"pullRequest,omitempty"`
|
||||||
|
Push *PushSpec `json:"push,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||||
|
type CheckRunSpec struct {
|
||||||
|
Types []string `json:"types,omitempty"`
|
||||||
|
Status string `json:"status,omitempty"`
|
||||||
|
|
||||||
|
// Names is a list of GitHub Actions glob patterns.
|
||||||
|
// Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
|
||||||
|
// Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
|
||||||
|
// So it is very likely that you can utilize this to trigger depending on the job.
|
||||||
|
Names []string `json:"names,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||||
|
type PullRequestSpec struct {
|
||||||
|
Types []string `json:"types,omitempty"`
|
||||||
|
Branches []string `json:"branches,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushSpec is the condition for triggering scale-up on push event
|
||||||
|
// Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
||||||
|
type PushSpec struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// CapacityReservation specifies the number of replicas temporarily added
|
||||||
|
// to the scale target until ExpirationTime.
|
||||||
|
type CapacityReservation struct {
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
ExpirationTime metav1.Time `json:"expirationTime,omitempty"`
|
||||||
|
Replicas int `json:"replicas,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ScaleTargetRef struct {
|
type ScaleTargetRef struct {
|
||||||
@@ -76,6 +138,50 @@ type MetricSpec struct {
|
|||||||
// to determine how many pods should be removed.
|
// to determine how many pods should be removed.
|
||||||
// +optional
|
// +optional
|
||||||
ScaleDownFactor string `json:"scaleDownFactor,omitempty"`
|
ScaleDownFactor string `json:"scaleDownFactor,omitempty"`
|
||||||
|
|
||||||
|
// ScaleUpAdjustment is the number of runners added on scale-up.
|
||||||
|
// You can only specify either ScaleUpFactor or ScaleUpAdjustment.
|
||||||
|
// +optional
|
||||||
|
ScaleUpAdjustment int `json:"scaleUpAdjustment,omitempty"`
|
||||||
|
|
||||||
|
// ScaleDownAdjustment is the number of runners removed on scale-down.
|
||||||
|
// You can only specify either ScaleDownFactor or ScaleDownAdjustment.
|
||||||
|
// +optional
|
||||||
|
ScaleDownAdjustment int `json:"scaleDownAdjustment,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||||
|
// A schedule can optionally be recurring, so that the correspoding override happens every day, week, month, or year.
|
||||||
|
type ScheduledOverride struct {
|
||||||
|
// StartTime is the time at which the first override starts.
|
||||||
|
StartTime metav1.Time `json:"startTime"`
|
||||||
|
|
||||||
|
// EndTime is the time at which the first override ends.
|
||||||
|
EndTime metav1.Time `json:"endTime"`
|
||||||
|
|
||||||
|
// MinReplicas is the number of runners while overriding.
|
||||||
|
// If omitted, it doesn't override minReplicas.
|
||||||
|
// +optional
|
||||||
|
// +nullable
|
||||||
|
// +kubebuilder:validation:Minimum=0
|
||||||
|
MinReplicas *int `json:"minReplicas,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
RecurrenceRule RecurrenceRule `json:"recurrenceRule,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RecurrenceRule struct {
|
||||||
|
// Frequency is the name of a predefined interval of each recurrence.
|
||||||
|
// The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
|
||||||
|
// If empty, the corresponding override happens only once.
|
||||||
|
// +optional
|
||||||
|
// +kubebuilder:validation:Enum=Daily;Weekly;Monthly;Yearly
|
||||||
|
Frequency string `json:"frequency,omitempty"`
|
||||||
|
|
||||||
|
// UntilTime is the time of the final recurrence.
|
||||||
|
// If empty, the schedule recurs forever.
|
||||||
|
// +optional
|
||||||
|
UntilTime metav1.Time `json:"untilTime,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type HorizontalRunnerAutoscalerStatus struct {
|
type HorizontalRunnerAutoscalerStatus struct {
|
||||||
@@ -90,7 +196,24 @@ type HorizontalRunnerAutoscalerStatus struct {
|
|||||||
DesiredReplicas *int `json:"desiredReplicas,omitempty"`
|
DesiredReplicas *int `json:"desiredReplicas,omitempty"`
|
||||||
|
|
||||||
// +optional
|
// +optional
|
||||||
|
// +nullable
|
||||||
LastSuccessfulScaleOutTime *metav1.Time `json:"lastSuccessfulScaleOutTime,omitempty"`
|
LastSuccessfulScaleOutTime *metav1.Time `json:"lastSuccessfulScaleOutTime,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
CacheEntries []CacheEntry `json:"cacheEntries,omitempty"`
|
||||||
|
|
||||||
|
// ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
|
||||||
|
// for observability.
|
||||||
|
// +optional
|
||||||
|
ScheduledOverridesSummary *string `json:"scheduledOverridesSummary,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
const CacheEntryKeyDesiredReplicas = "desiredReplicas"
|
||||||
|
|
||||||
|
type CacheEntry struct {
|
||||||
|
Key string `json:"key,omitempty"`
|
||||||
|
Value int `json:"value,omitempty"`
|
||||||
|
ExpirationTime metav1.Time `json:"expirationTime,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
// +kubebuilder:object:root=true
|
||||||
@@ -98,6 +221,7 @@ type HorizontalRunnerAutoscalerStatus struct {
|
|||||||
// +kubebuilder:printcolumn:JSONPath=".spec.minReplicas",name=Min,type=number
|
// +kubebuilder:printcolumn:JSONPath=".spec.minReplicas",name=Min,type=number
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.maxReplicas",name=Max,type=number
|
// +kubebuilder:printcolumn:JSONPath=".spec.maxReplicas",name=Max,type=number
|
||||||
// +kubebuilder:printcolumn:JSONPath=".status.desiredReplicas",name=Desired,type=number
|
// +kubebuilder:printcolumn:JSONPath=".status.desiredReplicas",name=Desired,type=number
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".status.scheduledOverridesSummary",name=Schedule,type=string
|
||||||
|
|
||||||
// HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
|
// HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
|
||||||
type HorizontalRunnerAutoscaler struct {
|
type HorizontalRunnerAutoscaler struct {
|
||||||
|
|||||||
@@ -19,12 +19,18 @@ package v1alpha1
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RunnerSpec defines the desired state of Runner
|
// RunnerSpec defines the desired state of Runner
|
||||||
type RunnerSpec struct {
|
type RunnerSpec struct {
|
||||||
|
// +optional
|
||||||
|
// +kubebuilder:validation:Pattern=`^[^/]+$`
|
||||||
|
Enterprise string `json:"enterprise,omitempty"`
|
||||||
|
|
||||||
// +optional
|
// +optional
|
||||||
// +kubebuilder:validation:Pattern=`^[^/]+$`
|
// +kubebuilder:validation:Pattern=`^[^/]+$`
|
||||||
Organization string `json:"organization,omitempty"`
|
Organization string `json:"organization,omitempty"`
|
||||||
@@ -39,11 +45,16 @@ type RunnerSpec struct {
|
|||||||
// +optional
|
// +optional
|
||||||
Group string `json:"group,omitempty"`
|
Group string `json:"group,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
Ephemeral *bool `json:"ephemeral,omitempty"`
|
||||||
|
|
||||||
// +optional
|
// +optional
|
||||||
Containers []corev1.Container `json:"containers,omitempty"`
|
Containers []corev1.Container `json:"containers,omitempty"`
|
||||||
// +optional
|
// +optional
|
||||||
DockerdContainerResources corev1.ResourceRequirements `json:"dockerdContainerResources,omitempty"`
|
DockerdContainerResources corev1.ResourceRequirements `json:"dockerdContainerResources,omitempty"`
|
||||||
// +optional
|
// +optional
|
||||||
|
DockerVolumeMounts []corev1.VolumeMount `json:"dockerVolumeMounts,omitempty"`
|
||||||
|
// +optional
|
||||||
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
|
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
|
||||||
// +optional
|
// +optional
|
||||||
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
|
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
|
||||||
@@ -88,16 +99,39 @@ type RunnerSpec struct {
|
|||||||
DockerdWithinRunnerContainer *bool `json:"dockerdWithinRunnerContainer,omitempty"`
|
DockerdWithinRunnerContainer *bool `json:"dockerdWithinRunnerContainer,omitempty"`
|
||||||
// +optional
|
// +optional
|
||||||
DockerEnabled *bool `json:"dockerEnabled,omitempty"`
|
DockerEnabled *bool `json:"dockerEnabled,omitempty"`
|
||||||
|
// +optional
|
||||||
|
DockerMTU *int64 `json:"dockerMTU,omitempty"`
|
||||||
|
// +optional
|
||||||
|
DockerRegistryMirror *string `json:"dockerRegistryMirror,omitempty"`
|
||||||
|
// +optional
|
||||||
|
HostAliases []corev1.HostAlias `json:"hostAliases,omitempty"`
|
||||||
|
// +optional
|
||||||
|
VolumeSizeLimit *resource.Quantity `json:"volumeSizeLimit,omitempty"`
|
||||||
|
|
||||||
|
// RuntimeClassName is the container runtime configuration that containers should run under.
|
||||||
|
// More info: https://kubernetes.io/docs/concepts/containers/runtime-class
|
||||||
|
// +optional
|
||||||
|
RuntimeClassName *string `json:"runtimeClassName,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateRepository validates repository field.
|
// ValidateRepository validates repository field.
|
||||||
func (rs *RunnerSpec) ValidateRepository() error {
|
func (rs *RunnerSpec) ValidateRepository() error {
|
||||||
// Organization and repository are both exclusive.
|
// Enterprise, Organization and repository are both exclusive.
|
||||||
if len(rs.Organization) == 0 && len(rs.Repository) == 0 {
|
foundCount := 0
|
||||||
return errors.New("Spec needs organization or repository")
|
if len(rs.Organization) > 0 {
|
||||||
|
foundCount += 1
|
||||||
}
|
}
|
||||||
if len(rs.Organization) > 0 && len(rs.Repository) > 0 {
|
if len(rs.Repository) > 0 {
|
||||||
return errors.New("Spec cannot have both organization and repository")
|
foundCount += 1
|
||||||
|
}
|
||||||
|
if len(rs.Enterprise) > 0 {
|
||||||
|
foundCount += 1
|
||||||
|
}
|
||||||
|
if foundCount == 0 {
|
||||||
|
return errors.New("Spec needs enterprise, organization or repository")
|
||||||
|
}
|
||||||
|
if foundCount > 1 {
|
||||||
|
return errors.New("Spec cannot have many fields defined enterprise, organization and repository")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -105,14 +139,22 @@ func (rs *RunnerSpec) ValidateRepository() error {
|
|||||||
|
|
||||||
// RunnerStatus defines the observed state of Runner
|
// RunnerStatus defines the observed state of Runner
|
||||||
type RunnerStatus struct {
|
type RunnerStatus struct {
|
||||||
|
// +optional
|
||||||
Registration RunnerStatusRegistration `json:"registration"`
|
Registration RunnerStatusRegistration `json:"registration"`
|
||||||
Phase string `json:"phase"`
|
// +optional
|
||||||
Reason string `json:"reason"`
|
Phase string `json:"phase,omitempty"`
|
||||||
Message string `json:"message"`
|
// +optional
|
||||||
|
Reason string `json:"reason,omitempty"`
|
||||||
|
// +optional
|
||||||
|
Message string `json:"message,omitempty"`
|
||||||
|
// +optional
|
||||||
|
// +nullable
|
||||||
|
LastRegistrationCheckTime *metav1.Time `json:"lastRegistrationCheckTime,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunnerStatusRegistration contains runner registration status
|
// RunnerStatusRegistration contains runner registration status
|
||||||
type RunnerStatusRegistration struct {
|
type RunnerStatusRegistration struct {
|
||||||
|
Enterprise string `json:"enterprise,omitempty"`
|
||||||
Organization string `json:"organization,omitempty"`
|
Organization string `json:"organization,omitempty"`
|
||||||
Repository string `json:"repository,omitempty"`
|
Repository string `json:"repository,omitempty"`
|
||||||
Labels []string `json:"labels,omitempty"`
|
Labels []string `json:"labels,omitempty"`
|
||||||
@@ -122,10 +164,12 @@ type RunnerStatusRegistration struct {
|
|||||||
|
|
||||||
// +kubebuilder:object:root=true
|
// +kubebuilder:object:root=true
|
||||||
// +kubebuilder:subresource:status
|
// +kubebuilder:subresource:status
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".spec.enterprise",name=Enterprise,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.organization",name=Organization,type=string
|
// +kubebuilder:printcolumn:JSONPath=".spec.organization",name=Organization,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.repository",name=Repository,type=string
|
// +kubebuilder:printcolumn:JSONPath=".spec.repository",name=Repository,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.labels",name=Labels,type=string
|
// +kubebuilder:printcolumn:JSONPath=".spec.labels",name=Labels,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=".status.phase",name=Status,type=string
|
// +kubebuilder:printcolumn:JSONPath=".status.phase",name=Status,type=string
|
||||||
|
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||||
|
|
||||||
// Runner is the Schema for the runners API
|
// Runner is the Schema for the runners API
|
||||||
type Runner struct {
|
type Runner struct {
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ func (r *Runner) SetupWebhookWithManager(mgr ctrl.Manager) error {
|
|||||||
Complete()
|
Complete()
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runner,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runners,versions=v1alpha1,name=mutate.runner.actions.summerwind.dev
|
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runner,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runners,versions=v1alpha1,name=mutate.runner.actions.summerwind.dev,sideEffects=None
|
||||||
|
|
||||||
var _ webhook.Defaulter = &Runner{}
|
var _ webhook.Defaulter = &Runner{}
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ func (r *Runner) Default() {
|
|||||||
// Nothing to do.
|
// Nothing to do.
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runner,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runners,versions=v1alpha1,name=validate.runner.actions.summerwind.dev
|
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runner,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runners,versions=v1alpha1,name=validate.runner.actions.summerwind.dev,sideEffects=None
|
||||||
|
|
||||||
var _ webhook.Validator = &Runner{}
|
var _ webhook.Validator = &Runner{}
|
||||||
|
|
||||||
|
|||||||
@@ -25,30 +25,54 @@ const (
|
|||||||
AutoscalingMetricTypePercentageRunnersBusy = "PercentageRunnersBusy"
|
AutoscalingMetricTypePercentageRunnersBusy = "PercentageRunnersBusy"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RunnerReplicaSetSpec defines the desired state of RunnerDeployment
|
// RunnerDeploymentSpec defines the desired state of RunnerDeployment
|
||||||
type RunnerDeploymentSpec struct {
|
type RunnerDeploymentSpec struct {
|
||||||
// +optional
|
// +optional
|
||||||
// +nullable
|
// +nullable
|
||||||
Replicas *int `json:"replicas,omitempty"`
|
Replicas *int `json:"replicas,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
// +nullable
|
||||||
|
Selector *metav1.LabelSelector `json:"selector"`
|
||||||
Template RunnerTemplate `json:"template"`
|
Template RunnerTemplate `json:"template"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type RunnerDeploymentStatus struct {
|
type RunnerDeploymentStatus struct {
|
||||||
AvailableReplicas int `json:"availableReplicas"`
|
// See K8s deployment controller code for reference
|
||||||
ReadyReplicas int `json:"readyReplicas"`
|
// https://github.com/kubernetes/kubernetes/blob/ea0764452222146c47ec826977f49d7001b0ea8c/pkg/controller/deployment/sync.go#L487-L505
|
||||||
|
|
||||||
// Replicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
|
// AvailableReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
|
||||||
|
// This corresponds to the sum of status.availableReplicas of all the runner replica sets.
|
||||||
|
// +optional
|
||||||
|
AvailableReplicas *int `json:"availableReplicas"`
|
||||||
|
|
||||||
|
// ReadyReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
|
||||||
|
// This corresponds to the sum of status.readyReplicas of all the runner replica sets.
|
||||||
|
// +optional
|
||||||
|
ReadyReplicas *int `json:"readyReplicas"`
|
||||||
|
|
||||||
|
// ReadyReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
|
||||||
|
// This corresponds to status.replicas of the runner replica set that has the desired template hash.
|
||||||
|
// +optional
|
||||||
|
UpdatedReplicas *int `json:"updatedReplicas"`
|
||||||
|
|
||||||
|
// DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
|
||||||
// This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
// This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
||||||
// +optional
|
// +optional
|
||||||
Replicas *int `json:"desiredReplicas,omitempty"`
|
DesiredReplicas *int `json:"desiredReplicas"`
|
||||||
|
|
||||||
|
// Replicas is the total number of replicas
|
||||||
|
// +optional
|
||||||
|
Replicas *int `json:"replicas"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
// +kubebuilder:object:root=true
|
||||||
// +kubebuilder:subresource:status
|
// +kubebuilder:subresource:status
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
|
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
|
||||||
// +kubebuilder:printcolumn:JSONPath=".status.availableReplicas",name=Current,type=number
|
// +kubebuilder:printcolumn:JSONPath=".status.replicas",name=Current,type=number
|
||||||
// +kubebuilder:printcolumn:JSONPath=".status.readyReplicas",name=Ready,type=number
|
// +kubebuilder:printcolumn:JSONPath=".status.updatedReplicas",name=Up-To-Date,type=number
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".status.availableReplicas",name=Available,type=number
|
||||||
|
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||||
|
|
||||||
// RunnerDeployment is the Schema for the runnerdeployments API
|
// RunnerDeployment is the Schema for the runnerdeployments API
|
||||||
type RunnerDeployment struct {
|
type RunnerDeployment struct {
|
||||||
|
|||||||
@@ -26,12 +26,26 @@ type RunnerReplicaSetSpec struct {
|
|||||||
// +nullable
|
// +nullable
|
||||||
Replicas *int `json:"replicas,omitempty"`
|
Replicas *int `json:"replicas,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
// +nullable
|
||||||
|
Selector *metav1.LabelSelector `json:"selector"`
|
||||||
Template RunnerTemplate `json:"template"`
|
Template RunnerTemplate `json:"template"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type RunnerReplicaSetStatus struct {
|
type RunnerReplicaSetStatus struct {
|
||||||
AvailableReplicas int `json:"availableReplicas"`
|
// See K8s replicaset controller code for reference
|
||||||
ReadyReplicas int `json:"readyReplicas"`
|
// https://github.com/kubernetes/kubernetes/blob/ea0764452222146c47ec826977f49d7001b0ea8c/pkg/controller/replicaset/replica_set_utils.go#L101-L106
|
||||||
|
|
||||||
|
// Replicas is the number of runners that are created and still being managed by this runner replica set.
|
||||||
|
// +optional
|
||||||
|
Replicas *int `json:"replicas"`
|
||||||
|
|
||||||
|
// ReadyReplicas is the number of runners that are created and Runnning.
|
||||||
|
ReadyReplicas *int `json:"readyReplicas"`
|
||||||
|
|
||||||
|
// AvailableReplicas is the number of runners that are created and Runnning.
|
||||||
|
// This is currently same as ReadyReplicas but perserved for future use.
|
||||||
|
AvailableReplicas *int `json:"availableReplicas"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type RunnerTemplate struct {
|
type RunnerTemplate struct {
|
||||||
@@ -43,8 +57,9 @@ type RunnerTemplate struct {
|
|||||||
// +kubebuilder:object:root=true
|
// +kubebuilder:object:root=true
|
||||||
// +kubebuilder:subresource:status
|
// +kubebuilder:subresource:status
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
|
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
|
||||||
// +kubebuilder:printcolumn:JSONPath=".status.availableReplicas",name=Current,type=number
|
// +kubebuilder:printcolumn:JSONPath=".status.replicas",name=Current,type=number
|
||||||
// +kubebuilder:printcolumn:JSONPath=".status.readyReplicas",name=Ready,type=number
|
// +kubebuilder:printcolumn:JSONPath=".status.readyReplicas",name=Ready,type=number
|
||||||
|
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||||
|
|
||||||
// RunnerReplicaSet is the Schema for the runnerreplicasets API
|
// RunnerReplicaSet is the Schema for the runnerreplicasets API
|
||||||
type RunnerReplicaSet struct {
|
type RunnerReplicaSet struct {
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ func (r *RunnerReplicaSet) SetupWebhookWithManager(mgr ctrl.Manager) error {
|
|||||||
Complete()
|
Complete()
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runnerreplicaset,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerreplicasets,versions=v1alpha1,name=mutate.runnerreplicaset.actions.summerwind.dev
|
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runnerreplicaset,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerreplicasets,versions=v1alpha1,name=mutate.runnerreplicaset.actions.summerwind.dev,sideEffects=None
|
||||||
|
|
||||||
var _ webhook.Defaulter = &RunnerReplicaSet{}
|
var _ webhook.Defaulter = &RunnerReplicaSet{}
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ func (r *RunnerReplicaSet) Default() {
|
|||||||
// Nothing to do.
|
// Nothing to do.
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runnerreplicaset,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerreplicasets,versions=v1alpha1,name=validate.runnerreplicaset.actions.summerwind.dev
|
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runnerreplicaset,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerreplicasets,versions=v1alpha1,name=validate.runnerreplicaset.actions.summerwind.dev,sideEffects=None
|
||||||
|
|
||||||
var _ webhook.Validator = &RunnerReplicaSet{}
|
var _ webhook.Validator = &RunnerReplicaSet{}
|
||||||
|
|
||||||
|
|||||||
@@ -22,9 +22,97 @@ package v1alpha1
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *CacheEntry) DeepCopyInto(out *CacheEntry) {
|
||||||
|
*out = *in
|
||||||
|
in.ExpirationTime.DeepCopyInto(&out.ExpirationTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheEntry.
|
||||||
|
func (in *CacheEntry) DeepCopy() *CacheEntry {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(CacheEntry)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *CapacityReservation) DeepCopyInto(out *CapacityReservation) {
|
||||||
|
*out = *in
|
||||||
|
in.ExpirationTime.DeepCopyInto(&out.ExpirationTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservation.
|
||||||
|
func (in *CapacityReservation) DeepCopy() *CapacityReservation {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(CapacityReservation)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *CheckRunSpec) DeepCopyInto(out *CheckRunSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.Types != nil {
|
||||||
|
in, out := &in.Types, &out.Types
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.Names != nil {
|
||||||
|
in, out := &in.Names, &out.Names
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckRunSpec.
|
||||||
|
func (in *CheckRunSpec) DeepCopy() *CheckRunSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(CheckRunSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *GitHubEventScaleUpTriggerSpec) DeepCopyInto(out *GitHubEventScaleUpTriggerSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.CheckRun != nil {
|
||||||
|
in, out := &in.CheckRun, &out.CheckRun
|
||||||
|
*out = new(CheckRunSpec)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.PullRequest != nil {
|
||||||
|
in, out := &in.PullRequest, &out.PullRequest
|
||||||
|
*out = new(PullRequestSpec)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.Push != nil {
|
||||||
|
in, out := &in.Push, &out.Push
|
||||||
|
*out = new(PushSpec)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubEventScaleUpTriggerSpec.
|
||||||
|
func (in *GitHubEventScaleUpTriggerSpec) DeepCopy() *GitHubEventScaleUpTriggerSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(GitHubEventScaleUpTriggerSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *HorizontalRunnerAutoscaler) DeepCopyInto(out *HorizontalRunnerAutoscaler) {
|
func (in *HorizontalRunnerAutoscaler) DeepCopyInto(out *HorizontalRunnerAutoscaler) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@@ -110,6 +198,27 @@ func (in *HorizontalRunnerAutoscalerSpec) DeepCopyInto(out *HorizontalRunnerAuto
|
|||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.ScaleUpTriggers != nil {
|
||||||
|
in, out := &in.ScaleUpTriggers, &out.ScaleUpTriggers
|
||||||
|
*out = make([]ScaleUpTrigger, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.CapacityReservations != nil {
|
||||||
|
in, out := &in.CapacityReservations, &out.CapacityReservations
|
||||||
|
*out = make([]CapacityReservation, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.ScheduledOverrides != nil {
|
||||||
|
in, out := &in.ScheduledOverrides, &out.ScheduledOverrides
|
||||||
|
*out = make([]ScheduledOverride, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerSpec.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerSpec.
|
||||||
@@ -134,6 +243,18 @@ func (in *HorizontalRunnerAutoscalerStatus) DeepCopyInto(out *HorizontalRunnerAu
|
|||||||
in, out := &in.LastSuccessfulScaleOutTime, &out.LastSuccessfulScaleOutTime
|
in, out := &in.LastSuccessfulScaleOutTime, &out.LastSuccessfulScaleOutTime
|
||||||
*out = (*in).DeepCopy()
|
*out = (*in).DeepCopy()
|
||||||
}
|
}
|
||||||
|
if in.CacheEntries != nil {
|
||||||
|
in, out := &in.CacheEntries, &out.CacheEntries
|
||||||
|
*out = make([]CacheEntry, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.ScheduledOverridesSummary != nil {
|
||||||
|
in, out := &in.ScheduledOverridesSummary, &out.ScheduledOverridesSummary
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerStatus.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerStatus.
|
||||||
@@ -166,6 +287,62 @@ func (in *MetricSpec) DeepCopy() *MetricSpec {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *PullRequestSpec) DeepCopyInto(out *PullRequestSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.Types != nil {
|
||||||
|
in, out := &in.Types, &out.Types
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.Branches != nil {
|
||||||
|
in, out := &in.Branches, &out.Branches
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PullRequestSpec.
|
||||||
|
func (in *PullRequestSpec) DeepCopy() *PullRequestSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(PullRequestSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *PushSpec) DeepCopyInto(out *PushSpec) {
|
||||||
|
*out = *in
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PushSpec.
|
||||||
|
func (in *PushSpec) DeepCopy() *PushSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(PushSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *RecurrenceRule) DeepCopyInto(out *RecurrenceRule) {
|
||||||
|
*out = *in
|
||||||
|
in.UntilTime.DeepCopyInto(&out.UntilTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceRule.
|
||||||
|
func (in *RecurrenceRule) DeepCopy() *RecurrenceRule {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(RecurrenceRule)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *Runner) DeepCopyInto(out *Runner) {
|
func (in *Runner) DeepCopyInto(out *Runner) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@@ -260,6 +437,11 @@ func (in *RunnerDeploymentSpec) DeepCopyInto(out *RunnerDeploymentSpec) {
|
|||||||
*out = new(int)
|
*out = new(int)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.Selector != nil {
|
||||||
|
in, out := &in.Selector, &out.Selector
|
||||||
|
*out = new(metav1.LabelSelector)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
in.Template.DeepCopyInto(&out.Template)
|
in.Template.DeepCopyInto(&out.Template)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -276,6 +458,26 @@ func (in *RunnerDeploymentSpec) DeepCopy() *RunnerDeploymentSpec {
|
|||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *RunnerDeploymentStatus) DeepCopyInto(out *RunnerDeploymentStatus) {
|
func (in *RunnerDeploymentStatus) DeepCopyInto(out *RunnerDeploymentStatus) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
if in.AvailableReplicas != nil {
|
||||||
|
in, out := &in.AvailableReplicas, &out.AvailableReplicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.ReadyReplicas != nil {
|
||||||
|
in, out := &in.ReadyReplicas, &out.ReadyReplicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.UpdatedReplicas != nil {
|
||||||
|
in, out := &in.UpdatedReplicas, &out.UpdatedReplicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.DesiredReplicas != nil {
|
||||||
|
in, out := &in.DesiredReplicas, &out.DesiredReplicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
if in.Replicas != nil {
|
if in.Replicas != nil {
|
||||||
in, out := &in.Replicas, &out.Replicas
|
in, out := &in.Replicas, &out.Replicas
|
||||||
*out = new(int)
|
*out = new(int)
|
||||||
@@ -331,7 +533,7 @@ func (in *RunnerReplicaSet) DeepCopyInto(out *RunnerReplicaSet) {
|
|||||||
out.TypeMeta = in.TypeMeta
|
out.TypeMeta = in.TypeMeta
|
||||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
in.Spec.DeepCopyInto(&out.Spec)
|
in.Spec.DeepCopyInto(&out.Spec)
|
||||||
out.Status = in.Status
|
in.Status.DeepCopyInto(&out.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerReplicaSet.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerReplicaSet.
|
||||||
@@ -392,6 +594,11 @@ func (in *RunnerReplicaSetSpec) DeepCopyInto(out *RunnerReplicaSetSpec) {
|
|||||||
*out = new(int)
|
*out = new(int)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.Selector != nil {
|
||||||
|
in, out := &in.Selector, &out.Selector
|
||||||
|
*out = new(metav1.LabelSelector)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
in.Template.DeepCopyInto(&out.Template)
|
in.Template.DeepCopyInto(&out.Template)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -408,6 +615,21 @@ func (in *RunnerReplicaSetSpec) DeepCopy() *RunnerReplicaSetSpec {
|
|||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *RunnerReplicaSetStatus) DeepCopyInto(out *RunnerReplicaSetStatus) {
|
func (in *RunnerReplicaSetStatus) DeepCopyInto(out *RunnerReplicaSetStatus) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
if in.Replicas != nil {
|
||||||
|
in, out := &in.Replicas, &out.Replicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.ReadyReplicas != nil {
|
||||||
|
in, out := &in.ReadyReplicas, &out.ReadyReplicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.AvailableReplicas != nil {
|
||||||
|
in, out := &in.AvailableReplicas, &out.AvailableReplicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerReplicaSetStatus.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerReplicaSetStatus.
|
||||||
@@ -428,6 +650,11 @@ func (in *RunnerSpec) DeepCopyInto(out *RunnerSpec) {
|
|||||||
*out = make([]string, len(*in))
|
*out = make([]string, len(*in))
|
||||||
copy(*out, *in)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
|
if in.Ephemeral != nil {
|
||||||
|
in, out := &in.Ephemeral, &out.Ephemeral
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
if in.Containers != nil {
|
if in.Containers != nil {
|
||||||
in, out := &in.Containers, &out.Containers
|
in, out := &in.Containers, &out.Containers
|
||||||
*out = make([]v1.Container, len(*in))
|
*out = make([]v1.Container, len(*in))
|
||||||
@@ -436,6 +663,13 @@ func (in *RunnerSpec) DeepCopyInto(out *RunnerSpec) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
in.DockerdContainerResources.DeepCopyInto(&out.DockerdContainerResources)
|
in.DockerdContainerResources.DeepCopyInto(&out.DockerdContainerResources)
|
||||||
|
if in.DockerVolumeMounts != nil {
|
||||||
|
in, out := &in.DockerVolumeMounts, &out.DockerVolumeMounts
|
||||||
|
*out = make([]v1.VolumeMount, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
in.Resources.DeepCopyInto(&out.Resources)
|
in.Resources.DeepCopyInto(&out.Resources)
|
||||||
if in.VolumeMounts != nil {
|
if in.VolumeMounts != nil {
|
||||||
in, out := &in.VolumeMounts, &out.VolumeMounts
|
in, out := &in.VolumeMounts, &out.VolumeMounts
|
||||||
@@ -535,6 +769,33 @@ func (in *RunnerSpec) DeepCopyInto(out *RunnerSpec) {
|
|||||||
*out = new(bool)
|
*out = new(bool)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.DockerMTU != nil {
|
||||||
|
in, out := &in.DockerMTU, &out.DockerMTU
|
||||||
|
*out = new(int64)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.DockerRegistryMirror != nil {
|
||||||
|
in, out := &in.DockerRegistryMirror, &out.DockerRegistryMirror
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.HostAliases != nil {
|
||||||
|
in, out := &in.HostAliases, &out.HostAliases
|
||||||
|
*out = make([]v1.HostAlias, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.VolumeSizeLimit != nil {
|
||||||
|
in, out := &in.VolumeSizeLimit, &out.VolumeSizeLimit
|
||||||
|
x := (*in).DeepCopy()
|
||||||
|
*out = &x
|
||||||
|
}
|
||||||
|
if in.RuntimeClassName != nil {
|
||||||
|
in, out := &in.RuntimeClassName, &out.RuntimeClassName
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSpec.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSpec.
|
||||||
@@ -551,6 +812,10 @@ func (in *RunnerSpec) DeepCopy() *RunnerSpec {
|
|||||||
func (in *RunnerStatus) DeepCopyInto(out *RunnerStatus) {
|
func (in *RunnerStatus) DeepCopyInto(out *RunnerStatus) {
|
||||||
*out = *in
|
*out = *in
|
||||||
in.Registration.DeepCopyInto(&out.Registration)
|
in.Registration.DeepCopyInto(&out.Registration)
|
||||||
|
if in.LastRegistrationCheckTime != nil {
|
||||||
|
in, out := &in.LastRegistrationCheckTime, &out.LastRegistrationCheckTime
|
||||||
|
*out = (*in).DeepCopy()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerStatus.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerStatus.
|
||||||
@@ -615,3 +880,47 @@ func (in *ScaleTargetRef) DeepCopy() *ScaleTargetRef {
|
|||||||
in.DeepCopyInto(out)
|
in.DeepCopyInto(out)
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ScaleUpTrigger) DeepCopyInto(out *ScaleUpTrigger) {
|
||||||
|
*out = *in
|
||||||
|
if in.GitHubEvent != nil {
|
||||||
|
in, out := &in.GitHubEvent, &out.GitHubEvent
|
||||||
|
*out = new(GitHubEventScaleUpTriggerSpec)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
out.Duration = in.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleUpTrigger.
|
||||||
|
func (in *ScaleUpTrigger) DeepCopy() *ScaleUpTrigger {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ScaleUpTrigger)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ScheduledOverride) DeepCopyInto(out *ScheduledOverride) {
|
||||||
|
*out = *in
|
||||||
|
in.StartTime.DeepCopyInto(&out.StartTime)
|
||||||
|
in.EndTime.DeepCopyInto(&out.EndTime)
|
||||||
|
if in.MinReplicas != nil {
|
||||||
|
in, out := &in.MinReplicas, &out.MinReplicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
in.RecurrenceRule.DeepCopyInto(&out.RecurrenceRule)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledOverride.
|
||||||
|
func (in *ScheduledOverride) DeepCopy() *ScheduledOverride {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ScheduledOverride)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|||||||
@@ -21,3 +21,5 @@
|
|||||||
.idea/
|
.idea/
|
||||||
*.tmproj
|
*.tmproj
|
||||||
.vscode/
|
.vscode/
|
||||||
|
# Docs
|
||||||
|
docs/
|
||||||
@@ -15,22 +15,16 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.2.0
|
version: 0.12.1
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
appVersion: 0.19.0
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
|
||||||
appVersion: 0.16.0
|
|
||||||
|
|
||||||
home: https://github.com/summerwind/actions-runner-controller
|
home: https://github.com/actions-runner-controller/actions-runner-controller
|
||||||
|
|
||||||
sources:
|
sources:
|
||||||
- https://github.com/summerwind/actions-runner-controller
|
- https://github.com/actions-runner-controller/actions-runner-controller
|
||||||
|
|
||||||
maintainers:
|
maintainers:
|
||||||
- name: summerwind
|
- name: actions-runner-controller
|
||||||
email: contact@summerwind.jp
|
url: https://github.com/actions-runner-controller
|
||||||
url: https://github.com/summerwind
|
|
||||||
- name: funkypenguin
|
|
||||||
email: davidy@funkypenguin.co.nz
|
|
||||||
url: https://www.funkypenguin.co.nz
|
|
||||||
|
|||||||
81
charts/actions-runner-controller/README.md
Normal file
81
charts/actions-runner-controller/README.md
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
## Docs
|
||||||
|
|
||||||
|
All additional docs are kept in the `docs/` folder, this README is solely for documenting the values.yaml keys and values
|
||||||
|
|
||||||
|
## Values
|
||||||
|
|
||||||
|
_The values are documented as of HEAD_
|
||||||
|
|
||||||
|
_Default values are the defaults set in the charts values.yaml, some properties have default configurations in the code for when the property is omitted or invalid_
|
||||||
|
|
||||||
|
| Key | Description | Default |
|
||||||
|
|----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------|
|
||||||
|
| `labels` | Set labels to apply to all resources in the chart | |
|
||||||
|
| `replicaCount` | Set the number of controller pods | 1 |
|
||||||
|
| `syncPeriod` | Set the period in which the controler reconciles the desired runners count | 10m |
|
||||||
|
| `githubAPICacheDuration` | Set the cache period for API calls | |
|
||||||
|
| `logLevel` | Set the log level of the controller container | |
|
||||||
|
| `authSecret.create` | Deploy the controller auth secret | true |
|
||||||
|
| `authSecret.name` | Set the name of the auth secret | controller-manager |
|
||||||
|
| `authSecret.github_app_id` | The ID of your GitHub App. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||||
|
| `authSecret.github_app_installation_id` | The ID of your GitHub App installation. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||||
|
| `authSecret.github_app_private_key` | The multiline string of your GitHub App's private key. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||||
|
| `authSecret.github_token` | Your chosen GitHub PAT token. **This can't be set at the same time as the `authSecret.github_app_*`** | |
|
||||||
|
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
|
||||||
|
| `image.tag` | The tag of the controller container | |
|
||||||
|
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
|
||||||
|
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
|
||||||
|
| `metrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||||
|
| `metrics.port` | Set port of metrics service | 8443 |
|
||||||
|
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||||
|
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||||
|
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
|
||||||
|
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
|
||||||
|
| `fullNameOverride` | Override the full resource names | |
|
||||||
|
| `nameOverride` | Override the resource name prefix | |
|
||||||
|
| `serviceAccont.annotations` | Set annotations to the service account | |
|
||||||
|
| `serviceAccount.create` | Deploy the controller pod under a service account | true |
|
||||||
|
| `podAnnotations` | Set annotations for the controller pod | |
|
||||||
|
| `podLabels` | Set labels for the controller pod | |
|
||||||
|
| `serviceAccount.name` | Set the name of the service account | |
|
||||||
|
| `securityContext` | Set the security context for each container in the controller pod | |
|
||||||
|
| `podSecurityContext` | Set the security context to controller pod | |
|
||||||
|
| `service.port` | Set controller service type | |
|
||||||
|
| `service.type` | Set controller service ports | |
|
||||||
|
| `topologySpreadConstraints` | Set the controller pod topologySpreadConstraints | |
|
||||||
|
| `nodeSelector` | Set the controller pod nodeSelector | |
|
||||||
|
| `resources` | Set the controller pod resources | |
|
||||||
|
| `affinity` | Set the controller pod affinity rules | |
|
||||||
|
| `tolerations` | Set the controller pod tolerations | |
|
||||||
|
| `env` | Set environment variables for the controller container | |
|
||||||
|
| `priorityClassName` | Set the controller pod priorityClassName | |
|
||||||
|
| `scope.watchNamespace` | Tells the controller which namespace to watch if `scope.singleNamespace` is true | |
|
||||||
|
| `scope.singleNamespace` | Limit the controller to watch a single namespace | false |
|
||||||
|
| `githubWebhookServer.logLevel` | Set the log level of the githubWebhookServer container | |
|
||||||
|
| `githubWebhookServer.replicaCount` | Set the number of webhook server pods | 1 |
|
||||||
|
| `githubWebhookServer.enabled` | Deploy the webhook server pod | false |
|
||||||
|
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | true |
|
||||||
|
| `githubWebhookServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
|
||||||
|
| `githubWebhookServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
|
||||||
|
| `githubWebhookServer.imagePullSecrets` | Specifies the secret to be used when pulling the githubWebhookServer pod containers | |
|
||||||
|
| `githubWebhookServer.nameOveride` | Override the resource name prefix | |
|
||||||
|
| `githubWebhookServer.fullNameOveride` | Override the full resource names | |
|
||||||
|
| `githubWebhookServer.serviceAccount.create` | Deploy the githubWebhookServer under a service account | true |
|
||||||
|
| `githubWebhookServer.serviceAccount.annotations` | Set annotations for the service account | |
|
||||||
|
| `githubWebhookServer.serviceAccount.name` | Set the service account name | |
|
||||||
|
| `githubWebhookServer.podAnnotations` | Set annotations for the githubWebhookServer pod | |
|
||||||
|
| `githubWebhookServer.podLabels` | Set labels for the githubWebhookServer pod | |
|
||||||
|
| `githubWebhookServer.podSecurityContext` | Set the security context to githubWebhookServer pod | |
|
||||||
|
| `githubWebhookServer.securityContext` | Set the security context for each container in the githubWebhookServer pod | |
|
||||||
|
| `githubWebhookServer.resources` | Set the githubWebhookServer pod resources | |
|
||||||
|
| `githubWebhookServer.topologySpreadConstraints` | Set the githubWebhookServer pod topologySpreadConstraints | |
|
||||||
|
| `githubWebhookServer.nodeSelector` | Set the githubWebhookServer pod nodeSelector | |
|
||||||
|
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
|
||||||
|
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
|
||||||
|
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
|
||||||
|
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
|
||||||
|
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
||||||
|
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
|
||||||
|
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
|
||||||
|
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
||||||
|
| `githubWebhookServer.ingress.tls` | Set tls configuration for ingress | |
|
||||||
@@ -22,6 +22,9 @@ resources:
|
|||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 128Mi
|
memory: 128Mi
|
||||||
|
|
||||||
|
authSecret:
|
||||||
|
create: false
|
||||||
|
|
||||||
# Set the following to true to create a dummy secret, allowing the manager pod to start
|
# Set the following to true to create a dummy secret, allowing the manager pod to start
|
||||||
# This is only useful in CI
|
# This is only useful in CI
|
||||||
createDummySecret: true
|
createDummySecret: true
|
||||||
@@ -18,6 +18,9 @@ spec:
|
|||||||
- JSONPath: .status.desiredReplicas
|
- JSONPath: .status.desiredReplicas
|
||||||
name: Desired
|
name: Desired
|
||||||
type: number
|
type: number
|
||||||
|
- JSONPath: .status.scheduledOverridesSummary
|
||||||
|
name: Schedule
|
||||||
|
type: string
|
||||||
group: actions.summerwind.dev
|
group: actions.summerwind.dev
|
||||||
names:
|
names:
|
||||||
kind: HorizontalRunnerAutoscaler
|
kind: HorizontalRunnerAutoscaler
|
||||||
@@ -48,6 +51,20 @@ spec:
|
|||||||
description: HorizontalRunnerAutoscalerSpec defines the desired state of
|
description: HorizontalRunnerAutoscalerSpec defines the desired state of
|
||||||
HorizontalRunnerAutoscaler
|
HorizontalRunnerAutoscaler
|
||||||
properties:
|
properties:
|
||||||
|
capacityReservations:
|
||||||
|
items:
|
||||||
|
description: CapacityReservation specifies the number of replicas
|
||||||
|
temporarily added to the scale target until ExpirationTime.
|
||||||
|
properties:
|
||||||
|
expirationTime:
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
replicas:
|
||||||
|
type: integer
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
maxReplicas:
|
maxReplicas:
|
||||||
description: MinReplicas is the maximum number of replicas the deployment
|
description: MinReplicas is the maximum number of replicas the deployment
|
||||||
is allowed to scale
|
is allowed to scale
|
||||||
@@ -64,6 +81,11 @@ spec:
|
|||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
|
scaleDownAdjustment:
|
||||||
|
description: ScaleDownAdjustment is the number of runners removed
|
||||||
|
on scale-down. You can only specify either ScaleDownFactor or
|
||||||
|
ScaleDownAdjustment.
|
||||||
|
type: integer
|
||||||
scaleDownFactor:
|
scaleDownFactor:
|
||||||
description: ScaleDownFactor is the multiplicative factor applied
|
description: ScaleDownFactor is the multiplicative factor applied
|
||||||
to the current number of runners used to determine how many
|
to the current number of runners used to determine how many
|
||||||
@@ -73,6 +95,10 @@ spec:
|
|||||||
description: ScaleDownThreshold is the percentage of busy runners
|
description: ScaleDownThreshold is the percentage of busy runners
|
||||||
less than which will trigger the hpa to scale the runners down.
|
less than which will trigger the hpa to scale the runners down.
|
||||||
type: string
|
type: string
|
||||||
|
scaleUpAdjustment:
|
||||||
|
description: ScaleUpAdjustment is the number of runners added
|
||||||
|
on scale-up. You can only specify either ScaleUpFactor or ScaleUpAdjustment.
|
||||||
|
type: integer
|
||||||
scaleUpFactor:
|
scaleUpFactor:
|
||||||
description: ScaleUpFactor is the multiplicative factor applied
|
description: ScaleUpFactor is the multiplicative factor applied
|
||||||
to the current number of runners used to determine how many
|
to the current number of runners used to determine how many
|
||||||
@@ -104,9 +130,129 @@ spec:
|
|||||||
name:
|
name:
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
|
scaleUpTriggers:
|
||||||
|
description: "ScaleUpTriggers is an experimental feature to increase
|
||||||
|
the desired replicas by 1 on each webhook requested received by the
|
||||||
|
webhookBasedAutoscaler. \n This feature requires you to also enable
|
||||||
|
and deploy the webhookBasedAutoscaler onto your cluster. \n Note that
|
||||||
|
the added runners remain until the next sync period at least, and
|
||||||
|
they may or may not be used by GitHub Actions depending on the timing.
|
||||||
|
They are intended to be used to gain \"resource slack\" immediately
|
||||||
|
after you receive a webhook from GitHub, so that you can loosely expect
|
||||||
|
MinReplicas runners to be always available."
|
||||||
|
items:
|
||||||
|
properties:
|
||||||
|
amount:
|
||||||
|
type: integer
|
||||||
|
duration:
|
||||||
|
type: string
|
||||||
|
githubEvent:
|
||||||
|
properties:
|
||||||
|
checkRun:
|
||||||
|
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||||
|
properties:
|
||||||
|
names:
|
||||||
|
description: Names is a list of GitHub Actions glob patterns.
|
||||||
|
Any check_run event whose name matches one of patterns
|
||||||
|
in the list can trigger autoscaling. Note that check_run
|
||||||
|
name seem to equal to the job name you've defined in
|
||||||
|
your actions workflow yaml file. So it is very likely
|
||||||
|
that you can utilize this to trigger depending on the
|
||||||
|
job.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
status:
|
||||||
|
type: string
|
||||||
|
types:
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
type: object
|
||||||
|
pullRequest:
|
||||||
|
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||||
|
properties:
|
||||||
|
branches:
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
types:
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
type: object
|
||||||
|
push:
|
||||||
|
description: PushSpec is the condition for triggering scale-up
|
||||||
|
on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
scheduledOverrides:
|
||||||
|
description: ScheduledOverrides is the list of ScheduledOverride. It
|
||||||
|
can be used to override a few fields of HorizontalRunnerAutoscalerSpec
|
||||||
|
on schedule. The earlier a scheduled override is, the higher it is
|
||||||
|
prioritized.
|
||||||
|
items:
|
||||||
|
description: ScheduledOverride can be used to override a few fields
|
||||||
|
of HorizontalRunnerAutoscalerSpec on schedule. A schedule can optionally
|
||||||
|
be recurring, so that the correspoding override happens every day,
|
||||||
|
week, month, or year.
|
||||||
|
properties:
|
||||||
|
endTime:
|
||||||
|
description: EndTime is the time at which the first override ends.
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
minReplicas:
|
||||||
|
description: MinReplicas is the number of runners while overriding.
|
||||||
|
If omitted, it doesn't override minReplicas.
|
||||||
|
minimum: 0
|
||||||
|
nullable: true
|
||||||
|
type: integer
|
||||||
|
recurrenceRule:
|
||||||
|
properties:
|
||||||
|
frequency:
|
||||||
|
description: Frequency is the name of a predefined interval
|
||||||
|
of each recurrence. The valid values are "Daily", "Weekly",
|
||||||
|
"Monthly", and "Yearly". If empty, the corresponding override
|
||||||
|
happens only once.
|
||||||
|
enum:
|
||||||
|
- Daily
|
||||||
|
- Weekly
|
||||||
|
- Monthly
|
||||||
|
- Yearly
|
||||||
|
type: string
|
||||||
|
untilTime:
|
||||||
|
description: UntilTime is the time of the final recurrence.
|
||||||
|
If empty, the schedule recurs forever.
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
startTime:
|
||||||
|
description: StartTime is the time at which the first override
|
||||||
|
starts.
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- endTime
|
||||||
|
- startTime
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
type: object
|
type: object
|
||||||
status:
|
status:
|
||||||
properties:
|
properties:
|
||||||
|
cacheEntries:
|
||||||
|
items:
|
||||||
|
properties:
|
||||||
|
expirationTime:
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
key:
|
||||||
|
type: string
|
||||||
|
value:
|
||||||
|
type: integer
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
desiredReplicas:
|
desiredReplicas:
|
||||||
description: DesiredReplicas is the total number of desired, non-terminated
|
description: DesiredReplicas is the total number of desired, non-terminated
|
||||||
and latest pods to be set for the primary RunnerSet This doesn't include
|
and latest pods to be set for the primary RunnerSet This doesn't include
|
||||||
@@ -114,6 +260,7 @@ spec:
|
|||||||
type: integer
|
type: integer
|
||||||
lastSuccessfulScaleOutTime:
|
lastSuccessfulScaleOutTime:
|
||||||
format: date-time
|
format: date-time
|
||||||
|
nullable: true
|
||||||
type: string
|
type: string
|
||||||
observedGeneration:
|
observedGeneration:
|
||||||
description: ObservedGeneration is the most recent generation observed
|
description: ObservedGeneration is the most recent generation observed
|
||||||
@@ -121,6 +268,11 @@ spec:
|
|||||||
which is updated on mutation by the API Server.
|
which is updated on mutation by the API Server.
|
||||||
format: int64
|
format: int64
|
||||||
type: integer
|
type: integer
|
||||||
|
scheduledOverridesSummary:
|
||||||
|
description: ScheduledOverridesSummary is the summary of active and
|
||||||
|
upcoming scheduled overrides to be shown in e.g. a column of a `kubectl
|
||||||
|
get hra` output for observability.
|
||||||
|
type: string
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
version: v1alpha1
|
version: v1alpha1
|
||||||
|
|||||||
@@ -10,12 +10,18 @@ spec:
|
|||||||
- JSONPath: .spec.replicas
|
- JSONPath: .spec.replicas
|
||||||
name: Desired
|
name: Desired
|
||||||
type: number
|
type: number
|
||||||
- JSONPath: .status.availableReplicas
|
- JSONPath: .status.replicas
|
||||||
name: Current
|
name: Current
|
||||||
type: number
|
type: number
|
||||||
- JSONPath: .status.readyReplicas
|
- JSONPath: .status.updatedReplicas
|
||||||
name: Ready
|
name: Up-To-Date
|
||||||
type: number
|
type: number
|
||||||
|
- JSONPath: .status.availableReplicas
|
||||||
|
name: Available
|
||||||
|
type: number
|
||||||
|
- JSONPath: .metadata.creationTimestamp
|
||||||
|
name: Age
|
||||||
|
type: date
|
||||||
group: actions.summerwind.dev
|
group: actions.summerwind.dev
|
||||||
names:
|
names:
|
||||||
kind: RunnerDeployment
|
kind: RunnerDeployment
|
||||||
@@ -38,11 +44,42 @@ spec:
|
|||||||
metadata:
|
metadata:
|
||||||
type: object
|
type: object
|
||||||
spec:
|
spec:
|
||||||
description: RunnerReplicaSetSpec defines the desired state of RunnerDeployment
|
description: RunnerDeploymentSpec defines the desired state of RunnerDeployment
|
||||||
properties:
|
properties:
|
||||||
replicas:
|
replicas:
|
||||||
nullable: true
|
nullable: true
|
||||||
type: integer
|
type: integer
|
||||||
|
selector:
|
||||||
|
description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.
|
||||||
|
nullable: true
|
||||||
|
properties:
|
||||||
|
matchExpressions:
|
||||||
|
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||||
|
items:
|
||||||
|
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
|
||||||
|
properties:
|
||||||
|
key:
|
||||||
|
description: key is the label key that the selector applies to.
|
||||||
|
type: string
|
||||||
|
operator:
|
||||||
|
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||||
|
type: string
|
||||||
|
values:
|
||||||
|
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- key
|
||||||
|
- operator
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
matchLabels:
|
||||||
|
additionalProperties:
|
||||||
|
type: string
|
||||||
|
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
template:
|
template:
|
||||||
properties:
|
properties:
|
||||||
metadata:
|
metadata:
|
||||||
@@ -402,6 +439,38 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
dockerEnabled:
|
dockerEnabled:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
dockerMTU:
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
dockerRegistryMirror:
|
||||||
|
type: string
|
||||||
|
dockerVolumeMounts:
|
||||||
|
items:
|
||||||
|
description: VolumeMount describes a mounting of a Volume within a container.
|
||||||
|
properties:
|
||||||
|
mountPath:
|
||||||
|
description: Path within the container at which the volume should be mounted. Must not contain ':'.
|
||||||
|
type: string
|
||||||
|
mountPropagation:
|
||||||
|
description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
description: This must match the Name of a Volume.
|
||||||
|
type: string
|
||||||
|
readOnly:
|
||||||
|
description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
|
||||||
|
type: boolean
|
||||||
|
subPath:
|
||||||
|
description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
|
||||||
|
type: string
|
||||||
|
subPathExpr:
|
||||||
|
description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- mountPath
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
dockerdContainerResources:
|
dockerdContainerResources:
|
||||||
description: ResourceRequirements describes the compute resource requirements.
|
description: ResourceRequirements describes the compute resource requirements.
|
||||||
properties:
|
properties:
|
||||||
@@ -426,6 +495,9 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
dockerdWithinRunnerContainer:
|
dockerdWithinRunnerContainer:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enterprise:
|
||||||
|
pattern: ^[^/]+$
|
||||||
|
type: string
|
||||||
env:
|
env:
|
||||||
items:
|
items:
|
||||||
description: EnvVar represents an environment variable present in a Container.
|
description: EnvVar represents an environment variable present in a Container.
|
||||||
@@ -534,6 +606,8 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
ephemeral:
|
||||||
|
type: boolean
|
||||||
ephemeralContainers:
|
ephemeralContainers:
|
||||||
items:
|
items:
|
||||||
description: An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.
|
description: An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.
|
||||||
@@ -543,6 +617,20 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
|
hostAliases:
|
||||||
|
items:
|
||||||
|
description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
|
||||||
|
properties:
|
||||||
|
hostnames:
|
||||||
|
description: Hostnames for the above IP address.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
ip:
|
||||||
|
description: IP address of the host file entry.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
image:
|
image:
|
||||||
type: string
|
type: string
|
||||||
imagePullPolicy:
|
imagePullPolicy:
|
||||||
@@ -600,6 +688,9 @@ spec:
|
|||||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
|
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
|
runtimeClassName:
|
||||||
|
description: 'RuntimeClassName is the container runtime configuration that containers should run under. More info: https://kubernetes.io/docs/concepts/containers/runtime-class'
|
||||||
|
type: string
|
||||||
securityContext:
|
securityContext:
|
||||||
description: PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.
|
description: PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.
|
||||||
properties:
|
properties:
|
||||||
@@ -731,6 +822,12 @@ spec:
|
|||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
volumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
volumes:
|
volumes:
|
||||||
items:
|
items:
|
||||||
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||||
@@ -1543,15 +1640,20 @@ spec:
|
|||||||
status:
|
status:
|
||||||
properties:
|
properties:
|
||||||
availableReplicas:
|
availableReplicas:
|
||||||
|
description: AvailableReplicas is the total number of available runners which have been successfully registered to GitHub and still running. This corresponds to the sum of status.availableReplicas of all the runner replica sets.
|
||||||
type: integer
|
type: integer
|
||||||
desiredReplicas:
|
desiredReplicas:
|
||||||
description: Replicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
description: DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
||||||
type: integer
|
type: integer
|
||||||
readyReplicas:
|
readyReplicas:
|
||||||
|
description: ReadyReplicas is the total number of available runners which have been successfully registered to GitHub and still running. This corresponds to the sum of status.readyReplicas of all the runner replica sets.
|
||||||
|
type: integer
|
||||||
|
replicas:
|
||||||
|
description: Replicas is the total number of replicas
|
||||||
|
type: integer
|
||||||
|
updatedReplicas:
|
||||||
|
description: ReadyReplicas is the total number of available runners which have been successfully registered to GitHub and still running. This corresponds to status.replicas of the runner replica set that has the desired template hash.
|
||||||
type: integer
|
type: integer
|
||||||
required:
|
|
||||||
- availableReplicas
|
|
||||||
- readyReplicas
|
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
version: v1alpha1
|
version: v1alpha1
|
||||||
|
|||||||
@@ -10,12 +10,15 @@ spec:
|
|||||||
- JSONPath: .spec.replicas
|
- JSONPath: .spec.replicas
|
||||||
name: Desired
|
name: Desired
|
||||||
type: number
|
type: number
|
||||||
- JSONPath: .status.availableReplicas
|
- JSONPath: .status.replicas
|
||||||
name: Current
|
name: Current
|
||||||
type: number
|
type: number
|
||||||
- JSONPath: .status.readyReplicas
|
- JSONPath: .status.readyReplicas
|
||||||
name: Ready
|
name: Ready
|
||||||
type: number
|
type: number
|
||||||
|
- JSONPath: .metadata.creationTimestamp
|
||||||
|
name: Age
|
||||||
|
type: date
|
||||||
group: actions.summerwind.dev
|
group: actions.summerwind.dev
|
||||||
names:
|
names:
|
||||||
kind: RunnerReplicaSet
|
kind: RunnerReplicaSet
|
||||||
@@ -43,6 +46,37 @@ spec:
|
|||||||
replicas:
|
replicas:
|
||||||
nullable: true
|
nullable: true
|
||||||
type: integer
|
type: integer
|
||||||
|
selector:
|
||||||
|
description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.
|
||||||
|
nullable: true
|
||||||
|
properties:
|
||||||
|
matchExpressions:
|
||||||
|
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||||
|
items:
|
||||||
|
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
|
||||||
|
properties:
|
||||||
|
key:
|
||||||
|
description: key is the label key that the selector applies to.
|
||||||
|
type: string
|
||||||
|
operator:
|
||||||
|
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||||
|
type: string
|
||||||
|
values:
|
||||||
|
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- key
|
||||||
|
- operator
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
matchLabels:
|
||||||
|
additionalProperties:
|
||||||
|
type: string
|
||||||
|
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
template:
|
template:
|
||||||
properties:
|
properties:
|
||||||
metadata:
|
metadata:
|
||||||
@@ -402,6 +436,38 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
dockerEnabled:
|
dockerEnabled:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
dockerMTU:
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
dockerRegistryMirror:
|
||||||
|
type: string
|
||||||
|
dockerVolumeMounts:
|
||||||
|
items:
|
||||||
|
description: VolumeMount describes a mounting of a Volume within a container.
|
||||||
|
properties:
|
||||||
|
mountPath:
|
||||||
|
description: Path within the container at which the volume should be mounted. Must not contain ':'.
|
||||||
|
type: string
|
||||||
|
mountPropagation:
|
||||||
|
description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
description: This must match the Name of a Volume.
|
||||||
|
type: string
|
||||||
|
readOnly:
|
||||||
|
description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
|
||||||
|
type: boolean
|
||||||
|
subPath:
|
||||||
|
description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
|
||||||
|
type: string
|
||||||
|
subPathExpr:
|
||||||
|
description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- mountPath
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
dockerdContainerResources:
|
dockerdContainerResources:
|
||||||
description: ResourceRequirements describes the compute resource requirements.
|
description: ResourceRequirements describes the compute resource requirements.
|
||||||
properties:
|
properties:
|
||||||
@@ -426,6 +492,9 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
dockerdWithinRunnerContainer:
|
dockerdWithinRunnerContainer:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enterprise:
|
||||||
|
pattern: ^[^/]+$
|
||||||
|
type: string
|
||||||
env:
|
env:
|
||||||
items:
|
items:
|
||||||
description: EnvVar represents an environment variable present in a Container.
|
description: EnvVar represents an environment variable present in a Container.
|
||||||
@@ -534,6 +603,8 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
ephemeral:
|
||||||
|
type: boolean
|
||||||
ephemeralContainers:
|
ephemeralContainers:
|
||||||
items:
|
items:
|
||||||
description: An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.
|
description: An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.
|
||||||
@@ -543,6 +614,20 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
|
hostAliases:
|
||||||
|
items:
|
||||||
|
description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
|
||||||
|
properties:
|
||||||
|
hostnames:
|
||||||
|
description: Hostnames for the above IP address.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
ip:
|
||||||
|
description: IP address of the host file entry.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
image:
|
image:
|
||||||
type: string
|
type: string
|
||||||
imagePullPolicy:
|
imagePullPolicy:
|
||||||
@@ -600,6 +685,9 @@ spec:
|
|||||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
|
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
|
runtimeClassName:
|
||||||
|
description: 'RuntimeClassName is the container runtime configuration that containers should run under. More info: https://kubernetes.io/docs/concepts/containers/runtime-class'
|
||||||
|
type: string
|
||||||
securityContext:
|
securityContext:
|
||||||
description: PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.
|
description: PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.
|
||||||
properties:
|
properties:
|
||||||
@@ -731,6 +819,12 @@ spec:
|
|||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
volumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
volumes:
|
volumes:
|
||||||
items:
|
items:
|
||||||
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||||
@@ -1543,8 +1637,13 @@ spec:
|
|||||||
status:
|
status:
|
||||||
properties:
|
properties:
|
||||||
availableReplicas:
|
availableReplicas:
|
||||||
|
description: AvailableReplicas is the number of runners that are created and Runnning. This is currently same as ReadyReplicas but perserved for future use.
|
||||||
type: integer
|
type: integer
|
||||||
readyReplicas:
|
readyReplicas:
|
||||||
|
description: ReadyReplicas is the number of runners that are created and Runnning.
|
||||||
|
type: integer
|
||||||
|
replicas:
|
||||||
|
description: Replicas is the number of runners that are created and still being managed by this runner replica set.
|
||||||
type: integer
|
type: integer
|
||||||
required:
|
required:
|
||||||
- availableReplicas
|
- availableReplicas
|
||||||
|
|||||||
@@ -7,6 +7,9 @@ metadata:
|
|||||||
name: runners.actions.summerwind.dev
|
name: runners.actions.summerwind.dev
|
||||||
spec:
|
spec:
|
||||||
additionalPrinterColumns:
|
additionalPrinterColumns:
|
||||||
|
- JSONPath: .spec.enterprise
|
||||||
|
name: Enterprise
|
||||||
|
type: string
|
||||||
- JSONPath: .spec.organization
|
- JSONPath: .spec.organization
|
||||||
name: Organization
|
name: Organization
|
||||||
type: string
|
type: string
|
||||||
@@ -19,6 +22,9 @@ spec:
|
|||||||
- JSONPath: .status.phase
|
- JSONPath: .status.phase
|
||||||
name: Status
|
name: Status
|
||||||
type: string
|
type: string
|
||||||
|
- JSONPath: .metadata.creationTimestamp
|
||||||
|
name: Age
|
||||||
|
type: date
|
||||||
group: actions.summerwind.dev
|
group: actions.summerwind.dev
|
||||||
names:
|
names:
|
||||||
kind: Runner
|
kind: Runner
|
||||||
@@ -395,6 +401,38 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
dockerEnabled:
|
dockerEnabled:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
dockerMTU:
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
dockerRegistryMirror:
|
||||||
|
type: string
|
||||||
|
dockerVolumeMounts:
|
||||||
|
items:
|
||||||
|
description: VolumeMount describes a mounting of a Volume within a container.
|
||||||
|
properties:
|
||||||
|
mountPath:
|
||||||
|
description: Path within the container at which the volume should be mounted. Must not contain ':'.
|
||||||
|
type: string
|
||||||
|
mountPropagation:
|
||||||
|
description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
description: This must match the Name of a Volume.
|
||||||
|
type: string
|
||||||
|
readOnly:
|
||||||
|
description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
|
||||||
|
type: boolean
|
||||||
|
subPath:
|
||||||
|
description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
|
||||||
|
type: string
|
||||||
|
subPathExpr:
|
||||||
|
description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- mountPath
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
dockerdContainerResources:
|
dockerdContainerResources:
|
||||||
description: ResourceRequirements describes the compute resource requirements.
|
description: ResourceRequirements describes the compute resource requirements.
|
||||||
properties:
|
properties:
|
||||||
@@ -419,6 +457,9 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
dockerdWithinRunnerContainer:
|
dockerdWithinRunnerContainer:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enterprise:
|
||||||
|
pattern: ^[^/]+$
|
||||||
|
type: string
|
||||||
env:
|
env:
|
||||||
items:
|
items:
|
||||||
description: EnvVar represents an environment variable present in a Container.
|
description: EnvVar represents an environment variable present in a Container.
|
||||||
@@ -527,6 +568,8 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
ephemeral:
|
||||||
|
type: boolean
|
||||||
ephemeralContainers:
|
ephemeralContainers:
|
||||||
items:
|
items:
|
||||||
description: An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.
|
description: An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.
|
||||||
@@ -536,6 +579,20 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
|
hostAliases:
|
||||||
|
items:
|
||||||
|
description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
|
||||||
|
properties:
|
||||||
|
hostnames:
|
||||||
|
description: Hostnames for the above IP address.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
ip:
|
||||||
|
description: IP address of the host file entry.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
image:
|
image:
|
||||||
type: string
|
type: string
|
||||||
imagePullPolicy:
|
imagePullPolicy:
|
||||||
@@ -593,6 +650,9 @@ spec:
|
|||||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
|
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
|
runtimeClassName:
|
||||||
|
description: 'RuntimeClassName is the container runtime configuration that containers should run under. More info: https://kubernetes.io/docs/concepts/containers/runtime-class'
|
||||||
|
type: string
|
||||||
securityContext:
|
securityContext:
|
||||||
description: PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.
|
description: PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.
|
||||||
properties:
|
properties:
|
||||||
@@ -724,6 +784,12 @@ spec:
|
|||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
volumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
volumes:
|
volumes:
|
||||||
items:
|
items:
|
||||||
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||||
@@ -1532,6 +1598,10 @@ spec:
|
|||||||
status:
|
status:
|
||||||
description: RunnerStatus defines the observed state of Runner
|
description: RunnerStatus defines the observed state of Runner
|
||||||
properties:
|
properties:
|
||||||
|
lastRegistrationCheckTime:
|
||||||
|
format: date-time
|
||||||
|
nullable: true
|
||||||
|
type: string
|
||||||
message:
|
message:
|
||||||
type: string
|
type: string
|
||||||
phase:
|
phase:
|
||||||
@@ -1541,6 +1611,8 @@ spec:
|
|||||||
registration:
|
registration:
|
||||||
description: RunnerStatusRegistration contains runner registration status
|
description: RunnerStatusRegistration contains runner registration status
|
||||||
properties:
|
properties:
|
||||||
|
enterprise:
|
||||||
|
type: string
|
||||||
expiresAt:
|
expiresAt:
|
||||||
format: date-time
|
format: date-time
|
||||||
type: string
|
type: string
|
||||||
@@ -1558,11 +1630,6 @@ spec:
|
|||||||
- expiresAt
|
- expiresAt
|
||||||
- token
|
- token
|
||||||
type: object
|
type: object
|
||||||
required:
|
|
||||||
- message
|
|
||||||
- phase
|
|
||||||
- reason
|
|
||||||
- registration
|
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
version: v1alpha1
|
version: v1alpha1
|
||||||
|
|||||||
40
charts/actions-runner-controller/docs/UPGRADING.md
Normal file
40
charts/actions-runner-controller/docs/UPGRADING.md
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
## Upgrading
|
||||||
|
|
||||||
|
This project makes extensive use of CRDs to provide much of its functionality. Helm unfortunately does not support [managing](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/) CRDs by design:
|
||||||
|
|
||||||
|
_The full breakdown as to how they came to this decision and why they have taken the approach they have for dealing with CRDs can be found in [Helm Improvement Proposal 11](https://github.com/helm/community/blob/main/hips/hip-0011.md)_
|
||||||
|
|
||||||
|
```
|
||||||
|
There is no support at this time for upgrading or deleting CRDs using Helm. This was an explicit decision after much
|
||||||
|
community discussion due to the danger for unintentional data loss. Furthermore, there is currently no community
|
||||||
|
consensus around how to handle CRDs and their lifecycle. As this evolves, Helm will add support for those use cases.
|
||||||
|
```
|
||||||
|
|
||||||
|
Helm will do an initial install of CRDs but it will not touch them afterwards (update or delete).
|
||||||
|
|
||||||
|
Additionally, because the project leverages CRDs so extensively you **MUST** run the matching controller app container with its matching CRDs i.e. always redeploy your CRDs if you are changing the app version.
|
||||||
|
|
||||||
|
Due to the above you can't just do a `helm upgrade` to release the latest version of the chart, the best practice steps are recorded below:
|
||||||
|
|
||||||
|
## Steps
|
||||||
|
|
||||||
|
1. Upgrade CRDs
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# REMEMBER TO UPDATE THE CHART_VERSION TO RELEVANT CHART VERISON!!!!
|
||||||
|
CHART_VERSION=0.11.0
|
||||||
|
|
||||||
|
curl -L https://github.com/actions-runner-controller/actions-runner-controller/releases/download/actions-runner-controller-${CHART_VERSION}/actions-runner-controller-${CHART_VERSION}.tgz | tar zxv --strip 1 actions-runner-controller/crds
|
||||||
|
|
||||||
|
kubectl apply -f crds/
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Upgrade the Helm release
|
||||||
|
|
||||||
|
```shell
|
||||||
|
helm upgrade --install \
|
||||||
|
--namespace actions-runner-system \
|
||||||
|
--version ${CHART_VERSION} \
|
||||||
|
actions-runner-controller/actions-runner-controller \
|
||||||
|
actions-runner-controller
|
||||||
|
```
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
1. Get the application URL by running these commands:
|
1. Get the application URL by running these commands:
|
||||||
{{- if .Values.ingress.enabled }}
|
{{- if .Values.githubWebhookServer.ingress.enabled }}
|
||||||
{{- range $host := .Values.ingress.hosts }}
|
{{- range $host := .Values.githubWebhookServer.ingress.hosts }}
|
||||||
{{- range .paths }}
|
{{- range .paths }}
|
||||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
|
http{{ if $.Values.githubWebhookServer.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- else if contains "NodePort" .Values.service.type }}
|
{{- else if contains "NodePort" .Values.service.type }}
|
||||||
|
|||||||
@@ -0,0 +1,60 @@
|
|||||||
|
{{/*
|
||||||
|
Expand the name of the chart.
|
||||||
|
*/}}
|
||||||
|
{{- define "actions-runner-controller-github-webhook-server.name" -}}
|
||||||
|
{{- default .Chart.Name .Values.githubWebhookServer.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "actions-runner-controller-github-webhook-server.instance" -}}
|
||||||
|
{{- printf "%s-%s" .Release.Name "github-webhook-server" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default fully qualified app name.
|
||||||
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
|
If release name contains chart name it will be used as a full name.
|
||||||
|
*/}}
|
||||||
|
{{- define "actions-runner-controller-github-webhook-server.fullname" -}}
|
||||||
|
{{- if .Values.githubWebhookServer.fullnameOverride }}
|
||||||
|
{{- .Values.githubWebhookServer.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- $name := default .Chart.Name .Values.githubWebhookServer.nameOverride }}
|
||||||
|
{{- $instance := include "actions-runner-controller-github-webhook-server.instance" . }}
|
||||||
|
{{- if contains $name $instance }}
|
||||||
|
{{- $instance | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- printf "%s-%s-%s" .Release.Name $name "github-webhook-server" | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Selector labels
|
||||||
|
*/}}
|
||||||
|
{{- define "actions-runner-controller-github-webhook-server.selectorLabels" -}}
|
||||||
|
app.kubernetes.io/name: {{ include "actions-runner-controller-github-webhook-server.name" . }}
|
||||||
|
app.kubernetes.io/instance: {{ include "actions-runner-controller-github-webhook-server.instance" . }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service account to use
|
||||||
|
*/}}
|
||||||
|
{{- define "actions-runner-controller-github-webhook-server.serviceAccountName" -}}
|
||||||
|
{{- if .Values.githubWebhookServer.serviceAccount.create }}
|
||||||
|
{{- default (include "actions-runner-controller-github-webhook-server.fullname" .) .Values.githubWebhookServer.serviceAccount.name }}
|
||||||
|
{{- else }}
|
||||||
|
{{- default "default" .Values.githubWebhookServer.serviceAccount.name }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "actions-runner-controller-github-webhook-server.secretName" -}}
|
||||||
|
{{- default (include "actions-runner-controller-github-webhook-server.fullname" .) .Values.githubWebhookServer.secret.name }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "actions-runner-controller-github-webhook-server.roleName" -}}
|
||||||
|
{{- include "actions-runner-controller-github-webhook-server.fullname" . }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "actions-runner-controller-github-webhook-server.serviceMonitorName" -}}
|
||||||
|
{{- include "actions-runner-controller-github-webhook-server.fullname" . | trunc 47 }}-service-monitor
|
||||||
|
{{- end }}
|
||||||
@@ -64,6 +64,10 @@ Create the name of the service account to use
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "actions-runner-controller.secretName" -}}
|
||||||
|
{{- default (include "actions-runner-controller.fullname" .) .Values.authSecret.name -}}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
{{- define "actions-runner-controller.leaderElectionRoleName" -}}
|
{{- define "actions-runner-controller.leaderElectionRoleName" -}}
|
||||||
{{- include "actions-runner-controller.fullname" . }}-leader-election
|
{{- include "actions-runner-controller.fullname" . }}-leader-election
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -85,11 +89,15 @@ Create the name of the service account to use
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "actions-runner-controller.webhookServiceName" -}}
|
{{- define "actions-runner-controller.webhookServiceName" -}}
|
||||||
{{- include "actions-runner-controller.fullname" . }}-webhook
|
{{- include "actions-runner-controller.fullname" . | trunc 55 }}-webhook
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "actions-runner-controller.authProxyServiceName" -}}
|
{{- define "actions-runner-controller.metricsServiceName" -}}
|
||||||
{{- include "actions-runner-controller.fullname" . }}-metrics-service
|
{{- include "actions-runner-controller.fullname" . | trunc 47 }}-metrics-service
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "actions-runner-controller.serviceMonitorName" -}}
|
||||||
|
{{- include "actions-runner-controller.fullname" . | trunc 47 }}-service-monitor
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "actions-runner-controller.selfsignedIssuerName" -}}
|
{{- define "actions-runner-controller.selfsignedIssuerName" -}}
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
{{- if .Values.metrics.proxy.enabled }}
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
metadata:
|
metadata:
|
||||||
@@ -11,3 +12,4 @@ rules:
|
|||||||
resources:
|
resources:
|
||||||
- subjectaccessreviews
|
- subjectaccessreviews
|
||||||
verbs: ["create"]
|
verbs: ["create"]
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
{{- if .Values.metrics.proxy.enabled }}
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
metadata:
|
metadata:
|
||||||
@@ -10,3 +11,4 @@ subjects:
|
|||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: {{ include "actions-runner-controller.serviceAccountName" . }}
|
name: {{ include "actions-runner-controller.serviceAccountName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ apiVersion: cert-manager.io/v1
|
|||||||
kind: Issuer
|
kind: Issuer
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "actions-runner-controller.selfsignedIssuerName" . }}
|
name: {{ include "actions-runner-controller.selfsignedIssuerName" . }}
|
||||||
namespace: {{ .Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
spec:
|
spec:
|
||||||
selfSigned: {}
|
selfSigned: {}
|
||||||
---
|
---
|
||||||
@@ -13,7 +13,7 @@ apiVersion: cert-manager.io/v1
|
|||||||
kind: Certificate
|
kind: Certificate
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "actions-runner-controller.servingCertName" . }}
|
name: {{ include "actions-runner-controller.servingCertName" . }}
|
||||||
namespace: {{ .Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
spec:
|
spec:
|
||||||
dnsNames:
|
dnsNames:
|
||||||
- {{ include "actions-runner-controller.webhookServiceName" . }}.{{ .Release.Namespace }}.svc
|
- {{ include "actions-runner-controller.webhookServiceName" . }}.{{ .Release.Namespace }}.svc
|
||||||
|
|||||||
@@ -3,12 +3,12 @@ kind: Service
|
|||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
name: {{ include "actions-runner-controller.authProxyServiceName" . }}
|
name: {{ include "actions-runner-controller.metricsServiceName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
spec:
|
spec:
|
||||||
ports:
|
ports:
|
||||||
- name: https
|
- name: metrics-port
|
||||||
port: 8443
|
port: {{ .Values.metrics.port }}
|
||||||
targetPort: https
|
targetPort: metrics-port
|
||||||
selector:
|
selector:
|
||||||
{{- include "actions-runner-controller.selectorLabels" . | nindent 4 }}
|
{{- include "actions-runner-controller.selectorLabels" . | nindent 4 }}
|
||||||
@@ -0,0 +1,15 @@
|
|||||||
|
{{- if .Values.metrics.serviceMonitor }}
|
||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: ServiceMonitor
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
|
name: {{ include "actions-runner-controller.serviceMonitorName" . }}
|
||||||
|
spec:
|
||||||
|
endpoints:
|
||||||
|
- path: /metrics
|
||||||
|
port: metrics-port
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "actions-runner-controller.selectorLabels" . | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
@@ -6,6 +6,7 @@ metadata:
|
|||||||
labels:
|
labels:
|
||||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
spec:
|
spec:
|
||||||
|
replicas: {{ .Values.replicaCount }}
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
{{- include "actions-runner-controller.selectorLabels" . | nindent 6 }}
|
{{- include "actions-runner-controller.selectorLabels" . | nindent 6 }}
|
||||||
@@ -17,6 +18,9 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "actions-runner-controller.selectorLabels" . | nindent 8 }}
|
{{- include "actions-runner-controller.selectorLabels" . | nindent 8 }}
|
||||||
|
{{- with .Values.podLabels }}
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
spec:
|
spec:
|
||||||
{{- with .Values.imagePullSecrets }}
|
{{- with .Values.imagePullSecrets }}
|
||||||
imagePullSecrets:
|
imagePullSecrets:
|
||||||
@@ -30,10 +34,21 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
containers:
|
containers:
|
||||||
- args:
|
- args:
|
||||||
- "--metrics-addr=127.0.0.1:8080"
|
{{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
||||||
|
{{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }}
|
||||||
|
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
|
||||||
- "--enable-leader-election"
|
- "--enable-leader-election"
|
||||||
- "--sync-period={{ .Values.syncPeriod }}"
|
- "--sync-period={{ .Values.syncPeriod }}"
|
||||||
- "--docker-image={{ .Values.image.dindSidecarRepositoryAndTag }}"
|
- "--docker-image={{ .Values.image.dindSidecarRepositoryAndTag }}"
|
||||||
|
{{- if .Values.scope.singleNamespace }}
|
||||||
|
- "--watch-namespace={{ default .Release.Namespace .Values.scope.watchNamespace }}"
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.githubAPICacheDuration }}
|
||||||
|
- "--github-api-cache-duration={{ .Values.githubAPICacheDuration }}"
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.logLevel }}
|
||||||
|
- "--log-level={{ .Values.logLevel }}"
|
||||||
|
{{- end }}
|
||||||
command:
|
command:
|
||||||
- "/manager"
|
- "/manager"
|
||||||
env:
|
env:
|
||||||
@@ -41,19 +56,19 @@ spec:
|
|||||||
valueFrom:
|
valueFrom:
|
||||||
secretKeyRef:
|
secretKeyRef:
|
||||||
key: github_token
|
key: github_token
|
||||||
name: controller-manager
|
name: {{ include "actions-runner-controller.secretName" . }}
|
||||||
optional: true
|
optional: true
|
||||||
- name: GITHUB_APP_ID
|
- name: GITHUB_APP_ID
|
||||||
valueFrom:
|
valueFrom:
|
||||||
secretKeyRef:
|
secretKeyRef:
|
||||||
key: github_app_id
|
key: github_app_id
|
||||||
name: controller-manager
|
name: {{ include "actions-runner-controller.secretName" . }}
|
||||||
optional: true
|
optional: true
|
||||||
- name: GITHUB_APP_INSTALLATION_ID
|
- name: GITHUB_APP_INSTALLATION_ID
|
||||||
valueFrom:
|
valueFrom:
|
||||||
secretKeyRef:
|
secretKeyRef:
|
||||||
key: github_app_installation_id
|
key: github_app_installation_id
|
||||||
name: controller-manager
|
name: {{ include "actions-runner-controller.secretName" . }}
|
||||||
optional: true
|
optional: true
|
||||||
- name: GITHUB_APP_PRIVATE_KEY
|
- name: GITHUB_APP_PRIVATE_KEY
|
||||||
value: /etc/actions-runner-controller/github_app_private_key
|
value: /etc/actions-runner-controller/github_app_private_key
|
||||||
@@ -68,39 +83,46 @@ spec:
|
|||||||
- containerPort: 9443
|
- containerPort: 9443
|
||||||
name: webhook-server
|
name: webhook-server
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
|
{{- if not .Values.metrics.proxy.enabled }}
|
||||||
|
- containerPort: {{ .Values.metrics.port }}
|
||||||
|
name: metrics-port
|
||||||
|
protocol: TCP
|
||||||
|
{{- end }}
|
||||||
resources:
|
resources:
|
||||||
{{- toYaml .Values.resources | nindent 12 }}
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
securityContext:
|
securityContext:
|
||||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: "/etc/actions-runner-controller"
|
- mountPath: "/etc/actions-runner-controller"
|
||||||
name: controller-manager
|
name: secret
|
||||||
readOnly: true
|
readOnly: true
|
||||||
- mountPath: /tmp
|
- mountPath: /tmp
|
||||||
name: tmp
|
name: tmp
|
||||||
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
||||||
name: cert
|
name: cert
|
||||||
readOnly: true
|
readOnly: true
|
||||||
|
{{- if .Values.metrics.proxy.enabled }}
|
||||||
- args:
|
- args:
|
||||||
- "--secure-listen-address=0.0.0.0:8443"
|
- "--secure-listen-address=0.0.0.0:{{ .Values.metrics.port }}"
|
||||||
- "--upstream=http://127.0.0.1:8080/"
|
- "--upstream=http://127.0.0.1:8080/"
|
||||||
- "--logtostderr=true"
|
- "--logtostderr=true"
|
||||||
- "--v=10"
|
- "--v=10"
|
||||||
image: "{{ .Values.kube_rbac_proxy.image.repository }}:{{ .Values.kube_rbac_proxy.image.tag }}"
|
image: "{{ .Values.metrics.proxy.image.repository }}:{{ .Values.metrics.proxy.image.tag }}"
|
||||||
name: kube-rbac-proxy
|
name: kube-rbac-proxy
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 8443
|
- containerPort: {{ .Values.metrics.port }}
|
||||||
name: https
|
name: metrics-port
|
||||||
resources:
|
resources:
|
||||||
{{- toYaml .Values.resources | nindent 12 }}
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
securityContext:
|
securityContext:
|
||||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
terminationGracePeriodSeconds: 10
|
terminationGracePeriodSeconds: 10
|
||||||
volumes:
|
volumes:
|
||||||
- name: controller-manager
|
- name: secret
|
||||||
secret:
|
secret:
|
||||||
secretName: controller-manager
|
secretName: {{ include "actions-runner-controller.secretName" . }}
|
||||||
- name: cert
|
- name: cert
|
||||||
secret:
|
secret:
|
||||||
defaultMode: 420
|
defaultMode: 420
|
||||||
@@ -119,3 +141,7 @@ spec:
|
|||||||
tolerations:
|
tolerations:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- with .Values.topologySpreadConstraints }}
|
||||||
|
topologySpreadConstraints:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -0,0 +1,108 @@
|
|||||||
|
{{- if .Values.githubWebhookServer.enabled }}
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.fullname" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
replicas: {{ .Values.githubWebhookServer.replicaCount }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 6 }}
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
{{- with .Values.githubWebhookServer.podAnnotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 8 }}
|
||||||
|
{{- with .Values.githubWebhookServer.podLabels }}
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
{{- with .Values.githubWebhookServer.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
serviceAccountName: {{ include "actions-runner-controller-github-webhook-server.serviceAccountName" . }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.githubWebhookServer.podSecurityContext | nindent 8 }}
|
||||||
|
{{- with .Values.githubWebhookServer.priorityClassName }}
|
||||||
|
priorityClassName: "{{ . }}"
|
||||||
|
{{- end }}
|
||||||
|
containers:
|
||||||
|
- args:
|
||||||
|
{{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
||||||
|
{{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }}
|
||||||
|
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
|
||||||
|
- "--sync-period={{ .Values.githubWebhookServer.syncPeriod }}"
|
||||||
|
{{- if .Values.githubWebhookServer.logLevel }}
|
||||||
|
- "--log-level={{ .Values.githubWebhookServer.logLevel }}"
|
||||||
|
{{- end }}
|
||||||
|
command:
|
||||||
|
- "/github-webhook-server"
|
||||||
|
env:
|
||||||
|
- name: GITHUB_WEBHOOK_SECRET_TOKEN
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
key: github_webhook_secret_token
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.secretName" . }}
|
||||||
|
optional: true
|
||||||
|
{{- range $key, $val := .Values.githubWebhookServer.env }}
|
||||||
|
- name: {{ $key }}
|
||||||
|
value: {{ $val | quote }}
|
||||||
|
{{- end }}
|
||||||
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
||||||
|
name: github-webhook-server
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
ports:
|
||||||
|
- containerPort: 8000
|
||||||
|
name: http
|
||||||
|
protocol: TCP
|
||||||
|
{{- if not .Values.metrics.proxy.enabled }}
|
||||||
|
- containerPort: {{ .Values.metrics.port }}
|
||||||
|
name: metrics-port
|
||||||
|
protocol: TCP
|
||||||
|
{{- end }}
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.githubWebhookServer.resources | nindent 12 }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.githubWebhookServer.securityContext | nindent 12 }}
|
||||||
|
{{- if .Values.metrics.proxy.enabled }}
|
||||||
|
- args:
|
||||||
|
- "--secure-listen-address=0.0.0.0:{{ .Values.metrics.port }}"
|
||||||
|
- "--upstream=http://127.0.0.1:8080/"
|
||||||
|
- "--logtostderr=true"
|
||||||
|
- "--v=10"
|
||||||
|
image: "{{ .Values.metrics.proxy.image.repository }}:{{ .Values.metrics.proxy.image.tag }}"
|
||||||
|
name: kube-rbac-proxy
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
ports:
|
||||||
|
- containerPort: {{ .Values.metrics.port }}
|
||||||
|
name: metrics-port
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
|
terminationGracePeriodSeconds: 10
|
||||||
|
{{- with .Values.githubWebhookServer.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.githubWebhookServer.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.githubWebhookServer.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.githubWebhookServer.topologySpreadConstraints }}
|
||||||
|
topologySpreadConstraints:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,41 @@
|
|||||||
|
{{- if .Values.githubWebhookServer.ingress.enabled -}}
|
||||||
|
{{- $fullName := include "actions-runner-controller-github-webhook-server.fullname" . -}}
|
||||||
|
{{- $svcPort := (index .Values.githubWebhookServer.service.ports 0).port -}}
|
||||||
|
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||||
|
apiVersion: networking.k8s.io/v1beta1
|
||||||
|
{{- else -}}
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
{{- end }}
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: {{ $fullName }}
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.githubWebhookServer.ingress.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
{{- if .Values.githubWebhookServer.ingress.tls }}
|
||||||
|
tls:
|
||||||
|
{{- range .Values.githubWebhookServer.ingress.tls }}
|
||||||
|
- hosts:
|
||||||
|
{{- range .hosts }}
|
||||||
|
- {{ . | quote }}
|
||||||
|
{{- end }}
|
||||||
|
secretName: {{ .secretName }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
rules:
|
||||||
|
{{- range .Values.githubWebhookServer.ingress.hosts }}
|
||||||
|
- host: {{ .host | quote }}
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
{{- range .paths }}
|
||||||
|
- path: {{ .path }}
|
||||||
|
backend:
|
||||||
|
serviceName: {{ $fullName }}
|
||||||
|
servicePort: {{ $svcPort }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,70 @@
|
|||||||
|
{{- if .Values.githubWebhookServer.enabled }}
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.roleName" . }}
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- horizontalrunnerautoscalers
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- horizontalrunnerautoscalers/finalizers
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- horizontalrunnerautoscalers/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- runnerdeployments
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- runnerdeployments/finalizers
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- runnerdeployments/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
{{- if .Values.githubWebhookServer.enabled }}
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.roleName" . }}
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.roleName" . }}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.serviceAccountName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
{{- if .Values.githubWebhookServer.enabled }}
|
||||||
|
{{- if .Values.githubWebhookServer.secret.create }}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.secretName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
|
type: Opaque
|
||||||
|
data:
|
||||||
|
{{- if .Values.githubWebhookServer.secret.github_webhook_secret_token }}
|
||||||
|
github_webhook_secret_token: {{ .Values.githubWebhookServer.secret.github_webhook_secret_token | toString | b64enc }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,22 @@
|
|||||||
|
{{- if .Values.githubWebhookServer.enabled }}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.fullname" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
type: {{ .Values.githubWebhookServer.service.type }}
|
||||||
|
ports:
|
||||||
|
{{ range $_, $port := .Values.githubWebhookServer.service.ports -}}
|
||||||
|
- {{ $port | toYaml | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.metrics.serviceMonitor }}
|
||||||
|
- name: metrics-port
|
||||||
|
port: {{ .Values.metrics.port }}
|
||||||
|
targetPort: metrics-port
|
||||||
|
{{- end }}
|
||||||
|
selector:
|
||||||
|
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,15 @@
|
|||||||
|
{{- if and .Values.githubWebhookServer.enabled .Values.metrics.serviceMonitor }}
|
||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: ServiceMonitor
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.serviceMonitorName" . }}
|
||||||
|
spec:
|
||||||
|
endpoints:
|
||||||
|
- path: /metrics
|
||||||
|
port: metrics-port
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,15 @@
|
|||||||
|
{{- if .Values.githubWebhookServer.enabled -}}
|
||||||
|
{{- if .Values.githubWebhookServer.serviceAccount.create -}}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.serviceAccountName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.githubWebhookServer.serviceAccount.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
@@ -1,14 +1,23 @@
|
|||||||
{{- if or .Values.authSecret.enabled }}
|
{{- if .Values.authSecret.create }}
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Secret
|
kind: Secret
|
||||||
metadata:
|
metadata:
|
||||||
name: controller-manager
|
name: {{ include "actions-runner-controller.secretName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
type: Opaque
|
type: Opaque
|
||||||
data:
|
data:
|
||||||
{{- range $k, $v := .Values.authSecret }}
|
{{- if .Values.authSecret.github_app_id }}
|
||||||
{{ $k }}: {{ $v | toString | b64enc }}
|
github_app_id: {{ .Values.authSecret.github_app_id | toString | b64enc }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.authSecret.github_app_installation_id }}
|
||||||
|
github_app_installation_id: {{ .Values.authSecret.github_app_installation_id | toString | b64enc }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.authSecret.github_app_private_key }}
|
||||||
|
github_app_private_key: {{ .Values.authSecret.github_app_private_key | toString | b64enc }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.authSecret.github_token }}
|
||||||
|
github_token: {{ .Values.authSecret.github_token | toString | b64enc }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -9,7 +9,6 @@ metadata:
|
|||||||
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "actions-runner-controller.servingCertName" . }}
|
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "actions-runner-controller.servingCertName" . }}
|
||||||
webhooks:
|
webhooks:
|
||||||
- clientConfig:
|
- clientConfig:
|
||||||
caBundle: Cg==
|
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
@@ -26,8 +25,8 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runners
|
- runners
|
||||||
|
sideEffects: None
|
||||||
- clientConfig:
|
- clientConfig:
|
||||||
caBundle: Cg==
|
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
@@ -44,8 +43,8 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runnerdeployments
|
- runnerdeployments
|
||||||
|
sideEffects: None
|
||||||
- clientConfig:
|
- clientConfig:
|
||||||
caBundle: Cg==
|
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
@@ -62,7 +61,7 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runnerreplicasets
|
- runnerreplicasets
|
||||||
|
sideEffects: None
|
||||||
---
|
---
|
||||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
apiVersion: admissionregistration.k8s.io/v1beta1
|
||||||
kind: ValidatingWebhookConfiguration
|
kind: ValidatingWebhookConfiguration
|
||||||
@@ -73,7 +72,6 @@ metadata:
|
|||||||
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "actions-runner-controller.servingCertName" . }}
|
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "actions-runner-controller.servingCertName" . }}
|
||||||
webhooks:
|
webhooks:
|
||||||
- clientConfig:
|
- clientConfig:
|
||||||
caBundle: Cg==
|
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
@@ -90,8 +88,8 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runners
|
- runners
|
||||||
|
sideEffects: None
|
||||||
- clientConfig:
|
- clientConfig:
|
||||||
caBundle: Cg==
|
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
@@ -108,8 +106,8 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runnerdeployments
|
- runnerdeployments
|
||||||
|
sideEffects: None
|
||||||
- clientConfig:
|
- clientConfig:
|
||||||
caBundle: Cg==
|
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
@@ -126,3 +124,4 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runnerreplicasets
|
- runnerreplicasets
|
||||||
|
sideEffects: None
|
||||||
|
|||||||
@@ -8,10 +8,16 @@ replicaCount: 1
|
|||||||
|
|
||||||
syncPeriod: 10m
|
syncPeriod: 10m
|
||||||
|
|
||||||
|
# The controller tries its best not to repeat the duplicate GitHub API call
|
||||||
|
# within this duration.
|
||||||
|
# Defaults to syncPeriod - 10s.
|
||||||
|
#githubAPICacheDuration: 30s
|
||||||
|
|
||||||
# Only 1 authentication method can be deployed at a time
|
# Only 1 authentication method can be deployed at a time
|
||||||
# Uncomment the configuration you are applying and fill in the details
|
# Uncomment the configuration you are applying and fill in the details
|
||||||
authSecret:
|
authSecret:
|
||||||
enabled: false
|
create: true
|
||||||
|
name: "controller-manager"
|
||||||
### GitHub Apps Configuration
|
### GitHub Apps Configuration
|
||||||
#github_app_id: ""
|
#github_app_id: ""
|
||||||
#github_app_installation_id: ""
|
#github_app_installation_id: ""
|
||||||
@@ -21,16 +27,9 @@ authSecret:
|
|||||||
|
|
||||||
image:
|
image:
|
||||||
repository: summerwind/actions-runner-controller
|
repository: summerwind/actions-runner-controller
|
||||||
# Overrides the manager image tag whose default is the chart appVersion if the tag key is commented out
|
|
||||||
tag: "latest"
|
|
||||||
dindSidecarRepositoryAndTag: "docker:dind"
|
dindSidecarRepositoryAndTag: "docker:dind"
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
kube_rbac_proxy:
|
|
||||||
image:
|
|
||||||
repository: gcr.io/kubebuilder/kube-rbac-proxy
|
|
||||||
tag: v0.4.1
|
|
||||||
|
|
||||||
imagePullSecrets: []
|
imagePullSecrets: []
|
||||||
nameOverride: ""
|
nameOverride: ""
|
||||||
fullnameOverride: ""
|
fullnameOverride: ""
|
||||||
@@ -46,10 +45,14 @@ serviceAccount:
|
|||||||
|
|
||||||
podAnnotations: {}
|
podAnnotations: {}
|
||||||
|
|
||||||
podSecurityContext: {}
|
podLabels: {}
|
||||||
|
|
||||||
|
podSecurityContext:
|
||||||
|
{}
|
||||||
# fsGroup: 2000
|
# fsGroup: 2000
|
||||||
|
|
||||||
securityContext: {}
|
securityContext:
|
||||||
|
{}
|
||||||
# capabilities:
|
# capabilities:
|
||||||
# drop:
|
# drop:
|
||||||
# - ALL
|
# - ALL
|
||||||
@@ -61,20 +64,17 @@ service:
|
|||||||
type: ClusterIP
|
type: ClusterIP
|
||||||
port: 443
|
port: 443
|
||||||
|
|
||||||
ingress:
|
metrics:
|
||||||
enabled: false
|
serviceMonitor: false
|
||||||
annotations: {}
|
port: 8443
|
||||||
# kubernetes.io/ingress.class: nginx
|
proxy:
|
||||||
# kubernetes.io/tls-acme: "true"
|
enabled: true
|
||||||
hosts:
|
image:
|
||||||
- host: chart-example.local
|
repository: quay.io/brancz/kube-rbac-proxy
|
||||||
paths: []
|
tag: v0.10.0
|
||||||
tls: []
|
|
||||||
# - secretName: chart-example-tls
|
|
||||||
# hosts:
|
|
||||||
# - chart-example.local
|
|
||||||
|
|
||||||
resources: {}
|
resources:
|
||||||
|
{}
|
||||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||||
# choice for the user. This also increases chances charts run on environments with little
|
# choice for the user. This also increases chances charts run on environments with little
|
||||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||||
@@ -86,13 +86,6 @@ resources: {}
|
|||||||
# cpu: 100m
|
# cpu: 100m
|
||||||
# memory: 128Mi
|
# memory: 128Mi
|
||||||
|
|
||||||
autoscaling:
|
|
||||||
enabled: false
|
|
||||||
minReplicas: 1
|
|
||||||
maxReplicas: 100
|
|
||||||
targetCPUUtilizationPercentage: 80
|
|
||||||
# targetMemoryUtilizationPercentage: 80
|
|
||||||
|
|
||||||
nodeSelector: {}
|
nodeSelector: {}
|
||||||
|
|
||||||
tolerations: []
|
tolerations: []
|
||||||
@@ -104,7 +97,66 @@ affinity: {}
|
|||||||
# PriorityClass: system-cluster-critical
|
# PriorityClass: system-cluster-critical
|
||||||
priorityClassName: ""
|
priorityClassName: ""
|
||||||
|
|
||||||
env: {}
|
env:
|
||||||
|
{}
|
||||||
# http_proxy: "proxy.com:8080"
|
# http_proxy: "proxy.com:8080"
|
||||||
# https_proxy: "proxy.com:8080"
|
# https_proxy: "proxy.com:8080"
|
||||||
# no_proxy: ""
|
# no_proxy: ""
|
||||||
|
|
||||||
|
scope:
|
||||||
|
# If true, the controller will only watch custom resources in a single namespace
|
||||||
|
singleNamespace: false
|
||||||
|
# If `scope.singleNamespace=true`, the controller will only watch custom resources in this namespace
|
||||||
|
# The default value is "", which means the namespace of the controller
|
||||||
|
watchNamespace: ""
|
||||||
|
|
||||||
|
githubWebhookServer:
|
||||||
|
enabled: false
|
||||||
|
replicaCount: 1
|
||||||
|
secret:
|
||||||
|
create: true
|
||||||
|
name: "github-webhook-server"
|
||||||
|
### GitHub Webhook Configuration
|
||||||
|
#github_webhook_secret_token: ""
|
||||||
|
imagePullSecrets: []
|
||||||
|
nameOverride: ""
|
||||||
|
fullnameOverride: ""
|
||||||
|
serviceAccount:
|
||||||
|
# Specifies whether a service account should be created
|
||||||
|
create: true
|
||||||
|
# Annotations to add to the service account
|
||||||
|
annotations: {}
|
||||||
|
# The name of the service account to use.
|
||||||
|
# If not set and create is true, a name is generated using the fullname template
|
||||||
|
name: ""
|
||||||
|
podAnnotations: {}
|
||||||
|
podLabels: {}
|
||||||
|
podSecurityContext: {}
|
||||||
|
# fsGroup: 2000
|
||||||
|
securityContext: {}
|
||||||
|
resources: {}
|
||||||
|
nodeSelector: {}
|
||||||
|
tolerations: []
|
||||||
|
affinity: {}
|
||||||
|
priorityClassName: ""
|
||||||
|
service:
|
||||||
|
type: ClusterIP
|
||||||
|
ports:
|
||||||
|
- port: 80
|
||||||
|
targetPort: http
|
||||||
|
protocol: TCP
|
||||||
|
name: http
|
||||||
|
#nodePort: someFixedPortForUseWithTerraformCdkCfnEtc
|
||||||
|
ingress:
|
||||||
|
enabled: false
|
||||||
|
annotations:
|
||||||
|
{}
|
||||||
|
# kubernetes.io/ingress.class: nginx
|
||||||
|
# kubernetes.io/tls-acme: "true"
|
||||||
|
hosts:
|
||||||
|
- host: chart-example.local
|
||||||
|
paths: []
|
||||||
|
tls: []
|
||||||
|
# - secretName: chart-example-tls
|
||||||
|
# hosts:
|
||||||
|
# - chart-example.local
|
||||||
|
|||||||
191
cmd/githubwebhookserver/main.go
Normal file
191
cmd/githubwebhookserver/main.go
Normal file
@@ -0,0 +1,191 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2021 The actions-runner-controller authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"flag"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
"github.com/summerwind/actions-runner-controller/controllers"
|
||||||
|
zaplib "go.uber.org/zap"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
|
_ "k8s.io/client-go/plugin/pkg/client/auth/exec"
|
||||||
|
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||||
|
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||||
|
// +kubebuilder:scaffold:imports
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
scheme = runtime.NewScheme()
|
||||||
|
setupLog = ctrl.Log.WithName("setup")
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
logLevelDebug = "debug"
|
||||||
|
logLevelInfo = "info"
|
||||||
|
logLevelWarn = "warn"
|
||||||
|
logLevelError = "error"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
_ = clientgoscheme.AddToScheme(scheme)
|
||||||
|
|
||||||
|
_ = actionsv1alpha1.AddToScheme(scheme)
|
||||||
|
// +kubebuilder:scaffold:scheme
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
|
||||||
|
webhookAddr string
|
||||||
|
metricsAddr string
|
||||||
|
|
||||||
|
// The secret token of the GitHub Webhook. See https://docs.github.com/en/developers/webhooks-and-events/securing-your-webhooks
|
||||||
|
webhookSecretToken string
|
||||||
|
|
||||||
|
watchNamespace string
|
||||||
|
|
||||||
|
enableLeaderElection bool
|
||||||
|
syncPeriod time.Duration
|
||||||
|
logLevel string
|
||||||
|
)
|
||||||
|
|
||||||
|
webhookSecretToken = os.Getenv("GITHUB_WEBHOOK_SECRET_TOKEN")
|
||||||
|
|
||||||
|
flag.StringVar(&webhookAddr, "webhook-addr", ":8000", "The address the metric endpoint binds to.")
|
||||||
|
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
|
||||||
|
flag.StringVar(&watchNamespace, "watch-namespace", "", "The namespace to watch for HorizontalRunnerAutoscaler's to scale on Webhook. Set to empty for letting it watch for all namespaces.")
|
||||||
|
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
|
||||||
|
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
||||||
|
flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled. When you use autoscaling, set to a lower value like 10 minute, because this corresponds to the minimum time to react on demand change")
|
||||||
|
flag.StringVar(&logLevel, "log-level", logLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if webhookSecretToken == "" {
|
||||||
|
setupLog.Info("-webhook-secret-token is missing or empty. Create one following https://docs.github.com/en/developers/webhooks-and-events/securing-your-webhooks")
|
||||||
|
}
|
||||||
|
|
||||||
|
if watchNamespace == "" {
|
||||||
|
setupLog.Info("-watch-namespace is empty. HorizontalRunnerAutoscalers in all the namespaces are watched, cached, and considered as scale targets.")
|
||||||
|
} else {
|
||||||
|
setupLog.Info("-watch-namespace is %q. Only HorizontalRunnerAutoscalers in %q are watched, cached, and considered as scale targets.")
|
||||||
|
}
|
||||||
|
|
||||||
|
logger := zap.New(func(o *zap.Options) {
|
||||||
|
switch logLevel {
|
||||||
|
case logLevelDebug:
|
||||||
|
o.Development = true
|
||||||
|
case logLevelInfo:
|
||||||
|
lvl := zaplib.NewAtomicLevelAt(zaplib.InfoLevel)
|
||||||
|
o.Level = &lvl
|
||||||
|
case logLevelWarn:
|
||||||
|
lvl := zaplib.NewAtomicLevelAt(zaplib.WarnLevel)
|
||||||
|
o.Level = &lvl
|
||||||
|
case logLevelError:
|
||||||
|
lvl := zaplib.NewAtomicLevelAt(zaplib.ErrorLevel)
|
||||||
|
o.Level = &lvl
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
ctrl.SetLogger(logger)
|
||||||
|
|
||||||
|
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||||
|
Scheme: scheme,
|
||||||
|
SyncPeriod: &syncPeriod,
|
||||||
|
LeaderElection: enableLeaderElection,
|
||||||
|
Namespace: watchNamespace,
|
||||||
|
MetricsBindAddress: metricsAddr,
|
||||||
|
Port: 9443,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
setupLog.Error(err, "unable to start manager")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
hraGitHubWebhook := &controllers.HorizontalRunnerAutoscalerGitHubWebhook{
|
||||||
|
Client: mgr.GetClient(),
|
||||||
|
Log: ctrl.Log.WithName("controllers").WithName("Runner"),
|
||||||
|
Recorder: nil,
|
||||||
|
Scheme: mgr.GetScheme(),
|
||||||
|
SecretKeyBytes: []byte(webhookSecretToken),
|
||||||
|
Namespace: watchNamespace,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = hraGitHubWebhook.SetupWithManager(mgr); err != nil {
|
||||||
|
setupLog.Error(err, "unable to create controller", "controller", "Runner")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer cancel()
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
setupLog.Info("starting webhook server")
|
||||||
|
if err := mgr.Start(ctx.Done()); err != nil {
|
||||||
|
setupLog.Error(err, "problem running manager")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
mux.HandleFunc("/", hraGitHubWebhook.Handle)
|
||||||
|
|
||||||
|
srv := http.Server{
|
||||||
|
Addr: webhookAddr,
|
||||||
|
Handler: mux,
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer cancel()
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
|
||||||
|
srv.Shutdown(context.Background())
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := srv.ListenAndServe(); err != nil {
|
||||||
|
if !errors.Is(err, http.ErrServerClosed) {
|
||||||
|
setupLog.Error(err, "problem running http server")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
<-ctrl.SetupSignalHandler()
|
||||||
|
cancel()
|
||||||
|
}()
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
@@ -18,6 +18,9 @@ spec:
|
|||||||
- JSONPath: .status.desiredReplicas
|
- JSONPath: .status.desiredReplicas
|
||||||
name: Desired
|
name: Desired
|
||||||
type: number
|
type: number
|
||||||
|
- JSONPath: .status.scheduledOverridesSummary
|
||||||
|
name: Schedule
|
||||||
|
type: string
|
||||||
group: actions.summerwind.dev
|
group: actions.summerwind.dev
|
||||||
names:
|
names:
|
||||||
kind: HorizontalRunnerAutoscaler
|
kind: HorizontalRunnerAutoscaler
|
||||||
@@ -48,6 +51,20 @@ spec:
|
|||||||
description: HorizontalRunnerAutoscalerSpec defines the desired state of
|
description: HorizontalRunnerAutoscalerSpec defines the desired state of
|
||||||
HorizontalRunnerAutoscaler
|
HorizontalRunnerAutoscaler
|
||||||
properties:
|
properties:
|
||||||
|
capacityReservations:
|
||||||
|
items:
|
||||||
|
description: CapacityReservation specifies the number of replicas
|
||||||
|
temporarily added to the scale target until ExpirationTime.
|
||||||
|
properties:
|
||||||
|
expirationTime:
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
replicas:
|
||||||
|
type: integer
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
maxReplicas:
|
maxReplicas:
|
||||||
description: MinReplicas is the maximum number of replicas the deployment
|
description: MinReplicas is the maximum number of replicas the deployment
|
||||||
is allowed to scale
|
is allowed to scale
|
||||||
@@ -64,6 +81,11 @@ spec:
|
|||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
|
scaleDownAdjustment:
|
||||||
|
description: ScaleDownAdjustment is the number of runners removed
|
||||||
|
on scale-down. You can only specify either ScaleDownFactor or
|
||||||
|
ScaleDownAdjustment.
|
||||||
|
type: integer
|
||||||
scaleDownFactor:
|
scaleDownFactor:
|
||||||
description: ScaleDownFactor is the multiplicative factor applied
|
description: ScaleDownFactor is the multiplicative factor applied
|
||||||
to the current number of runners used to determine how many
|
to the current number of runners used to determine how many
|
||||||
@@ -73,6 +95,10 @@ spec:
|
|||||||
description: ScaleDownThreshold is the percentage of busy runners
|
description: ScaleDownThreshold is the percentage of busy runners
|
||||||
less than which will trigger the hpa to scale the runners down.
|
less than which will trigger the hpa to scale the runners down.
|
||||||
type: string
|
type: string
|
||||||
|
scaleUpAdjustment:
|
||||||
|
description: ScaleUpAdjustment is the number of runners added
|
||||||
|
on scale-up. You can only specify either ScaleUpFactor or ScaleUpAdjustment.
|
||||||
|
type: integer
|
||||||
scaleUpFactor:
|
scaleUpFactor:
|
||||||
description: ScaleUpFactor is the multiplicative factor applied
|
description: ScaleUpFactor is the multiplicative factor applied
|
||||||
to the current number of runners used to determine how many
|
to the current number of runners used to determine how many
|
||||||
@@ -104,9 +130,129 @@ spec:
|
|||||||
name:
|
name:
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
|
scaleUpTriggers:
|
||||||
|
description: "ScaleUpTriggers is an experimental feature to increase
|
||||||
|
the desired replicas by 1 on each webhook requested received by the
|
||||||
|
webhookBasedAutoscaler. \n This feature requires you to also enable
|
||||||
|
and deploy the webhookBasedAutoscaler onto your cluster. \n Note that
|
||||||
|
the added runners remain until the next sync period at least, and
|
||||||
|
they may or may not be used by GitHub Actions depending on the timing.
|
||||||
|
They are intended to be used to gain \"resource slack\" immediately
|
||||||
|
after you receive a webhook from GitHub, so that you can loosely expect
|
||||||
|
MinReplicas runners to be always available."
|
||||||
|
items:
|
||||||
|
properties:
|
||||||
|
amount:
|
||||||
|
type: integer
|
||||||
|
duration:
|
||||||
|
type: string
|
||||||
|
githubEvent:
|
||||||
|
properties:
|
||||||
|
checkRun:
|
||||||
|
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||||
|
properties:
|
||||||
|
names:
|
||||||
|
description: Names is a list of GitHub Actions glob patterns.
|
||||||
|
Any check_run event whose name matches one of patterns
|
||||||
|
in the list can trigger autoscaling. Note that check_run
|
||||||
|
name seem to equal to the job name you've defined in
|
||||||
|
your actions workflow yaml file. So it is very likely
|
||||||
|
that you can utilize this to trigger depending on the
|
||||||
|
job.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
status:
|
||||||
|
type: string
|
||||||
|
types:
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
type: object
|
||||||
|
pullRequest:
|
||||||
|
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||||
|
properties:
|
||||||
|
branches:
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
types:
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
type: object
|
||||||
|
push:
|
||||||
|
description: PushSpec is the condition for triggering scale-up
|
||||||
|
on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
scheduledOverrides:
|
||||||
|
description: ScheduledOverrides is the list of ScheduledOverride. It
|
||||||
|
can be used to override a few fields of HorizontalRunnerAutoscalerSpec
|
||||||
|
on schedule. The earlier a scheduled override is, the higher it is
|
||||||
|
prioritized.
|
||||||
|
items:
|
||||||
|
description: ScheduledOverride can be used to override a few fields
|
||||||
|
of HorizontalRunnerAutoscalerSpec on schedule. A schedule can optionally
|
||||||
|
be recurring, so that the correspoding override happens every day,
|
||||||
|
week, month, or year.
|
||||||
|
properties:
|
||||||
|
endTime:
|
||||||
|
description: EndTime is the time at which the first override ends.
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
minReplicas:
|
||||||
|
description: MinReplicas is the number of runners while overriding.
|
||||||
|
If omitted, it doesn't override minReplicas.
|
||||||
|
minimum: 0
|
||||||
|
nullable: true
|
||||||
|
type: integer
|
||||||
|
recurrenceRule:
|
||||||
|
properties:
|
||||||
|
frequency:
|
||||||
|
description: Frequency is the name of a predefined interval
|
||||||
|
of each recurrence. The valid values are "Daily", "Weekly",
|
||||||
|
"Monthly", and "Yearly". If empty, the corresponding override
|
||||||
|
happens only once.
|
||||||
|
enum:
|
||||||
|
- Daily
|
||||||
|
- Weekly
|
||||||
|
- Monthly
|
||||||
|
- Yearly
|
||||||
|
type: string
|
||||||
|
untilTime:
|
||||||
|
description: UntilTime is the time of the final recurrence.
|
||||||
|
If empty, the schedule recurs forever.
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
startTime:
|
||||||
|
description: StartTime is the time at which the first override
|
||||||
|
starts.
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- endTime
|
||||||
|
- startTime
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
type: object
|
type: object
|
||||||
status:
|
status:
|
||||||
properties:
|
properties:
|
||||||
|
cacheEntries:
|
||||||
|
items:
|
||||||
|
properties:
|
||||||
|
expirationTime:
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
key:
|
||||||
|
type: string
|
||||||
|
value:
|
||||||
|
type: integer
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
desiredReplicas:
|
desiredReplicas:
|
||||||
description: DesiredReplicas is the total number of desired, non-terminated
|
description: DesiredReplicas is the total number of desired, non-terminated
|
||||||
and latest pods to be set for the primary RunnerSet This doesn't include
|
and latest pods to be set for the primary RunnerSet This doesn't include
|
||||||
@@ -114,6 +260,7 @@ spec:
|
|||||||
type: integer
|
type: integer
|
||||||
lastSuccessfulScaleOutTime:
|
lastSuccessfulScaleOutTime:
|
||||||
format: date-time
|
format: date-time
|
||||||
|
nullable: true
|
||||||
type: string
|
type: string
|
||||||
observedGeneration:
|
observedGeneration:
|
||||||
description: ObservedGeneration is the most recent generation observed
|
description: ObservedGeneration is the most recent generation observed
|
||||||
@@ -121,6 +268,11 @@ spec:
|
|||||||
which is updated on mutation by the API Server.
|
which is updated on mutation by the API Server.
|
||||||
format: int64
|
format: int64
|
||||||
type: integer
|
type: integer
|
||||||
|
scheduledOverridesSummary:
|
||||||
|
description: ScheduledOverridesSummary is the summary of active and
|
||||||
|
upcoming scheduled overrides to be shown in e.g. a column of a `kubectl
|
||||||
|
get hra` output for observability.
|
||||||
|
type: string
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
version: v1alpha1
|
version: v1alpha1
|
||||||
|
|||||||
@@ -10,12 +10,18 @@ spec:
|
|||||||
- JSONPath: .spec.replicas
|
- JSONPath: .spec.replicas
|
||||||
name: Desired
|
name: Desired
|
||||||
type: number
|
type: number
|
||||||
- JSONPath: .status.availableReplicas
|
- JSONPath: .status.replicas
|
||||||
name: Current
|
name: Current
|
||||||
type: number
|
type: number
|
||||||
- JSONPath: .status.readyReplicas
|
- JSONPath: .status.updatedReplicas
|
||||||
name: Ready
|
name: Up-To-Date
|
||||||
type: number
|
type: number
|
||||||
|
- JSONPath: .status.availableReplicas
|
||||||
|
name: Available
|
||||||
|
type: number
|
||||||
|
- JSONPath: .metadata.creationTimestamp
|
||||||
|
name: Age
|
||||||
|
type: date
|
||||||
group: actions.summerwind.dev
|
group: actions.summerwind.dev
|
||||||
names:
|
names:
|
||||||
kind: RunnerDeployment
|
kind: RunnerDeployment
|
||||||
@@ -38,11 +44,42 @@ spec:
|
|||||||
metadata:
|
metadata:
|
||||||
type: object
|
type: object
|
||||||
spec:
|
spec:
|
||||||
description: RunnerReplicaSetSpec defines the desired state of RunnerDeployment
|
description: RunnerDeploymentSpec defines the desired state of RunnerDeployment
|
||||||
properties:
|
properties:
|
||||||
replicas:
|
replicas:
|
||||||
nullable: true
|
nullable: true
|
||||||
type: integer
|
type: integer
|
||||||
|
selector:
|
||||||
|
description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.
|
||||||
|
nullable: true
|
||||||
|
properties:
|
||||||
|
matchExpressions:
|
||||||
|
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||||
|
items:
|
||||||
|
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
|
||||||
|
properties:
|
||||||
|
key:
|
||||||
|
description: key is the label key that the selector applies to.
|
||||||
|
type: string
|
||||||
|
operator:
|
||||||
|
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||||
|
type: string
|
||||||
|
values:
|
||||||
|
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- key
|
||||||
|
- operator
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
matchLabels:
|
||||||
|
additionalProperties:
|
||||||
|
type: string
|
||||||
|
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
template:
|
template:
|
||||||
properties:
|
properties:
|
||||||
metadata:
|
metadata:
|
||||||
@@ -402,6 +439,38 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
dockerEnabled:
|
dockerEnabled:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
dockerMTU:
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
dockerRegistryMirror:
|
||||||
|
type: string
|
||||||
|
dockerVolumeMounts:
|
||||||
|
items:
|
||||||
|
description: VolumeMount describes a mounting of a Volume within a container.
|
||||||
|
properties:
|
||||||
|
mountPath:
|
||||||
|
description: Path within the container at which the volume should be mounted. Must not contain ':'.
|
||||||
|
type: string
|
||||||
|
mountPropagation:
|
||||||
|
description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
description: This must match the Name of a Volume.
|
||||||
|
type: string
|
||||||
|
readOnly:
|
||||||
|
description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
|
||||||
|
type: boolean
|
||||||
|
subPath:
|
||||||
|
description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
|
||||||
|
type: string
|
||||||
|
subPathExpr:
|
||||||
|
description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- mountPath
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
dockerdContainerResources:
|
dockerdContainerResources:
|
||||||
description: ResourceRequirements describes the compute resource requirements.
|
description: ResourceRequirements describes the compute resource requirements.
|
||||||
properties:
|
properties:
|
||||||
@@ -426,6 +495,9 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
dockerdWithinRunnerContainer:
|
dockerdWithinRunnerContainer:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enterprise:
|
||||||
|
pattern: ^[^/]+$
|
||||||
|
type: string
|
||||||
env:
|
env:
|
||||||
items:
|
items:
|
||||||
description: EnvVar represents an environment variable present in a Container.
|
description: EnvVar represents an environment variable present in a Container.
|
||||||
@@ -534,6 +606,8 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
ephemeral:
|
||||||
|
type: boolean
|
||||||
ephemeralContainers:
|
ephemeralContainers:
|
||||||
items:
|
items:
|
||||||
description: An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.
|
description: An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.
|
||||||
@@ -543,6 +617,20 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
|
hostAliases:
|
||||||
|
items:
|
||||||
|
description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
|
||||||
|
properties:
|
||||||
|
hostnames:
|
||||||
|
description: Hostnames for the above IP address.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
ip:
|
||||||
|
description: IP address of the host file entry.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
image:
|
image:
|
||||||
type: string
|
type: string
|
||||||
imagePullPolicy:
|
imagePullPolicy:
|
||||||
@@ -600,6 +688,9 @@ spec:
|
|||||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
|
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
|
runtimeClassName:
|
||||||
|
description: 'RuntimeClassName is the container runtime configuration that containers should run under. More info: https://kubernetes.io/docs/concepts/containers/runtime-class'
|
||||||
|
type: string
|
||||||
securityContext:
|
securityContext:
|
||||||
description: PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.
|
description: PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.
|
||||||
properties:
|
properties:
|
||||||
@@ -731,6 +822,12 @@ spec:
|
|||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
volumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
volumes:
|
volumes:
|
||||||
items:
|
items:
|
||||||
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||||
@@ -1543,15 +1640,20 @@ spec:
|
|||||||
status:
|
status:
|
||||||
properties:
|
properties:
|
||||||
availableReplicas:
|
availableReplicas:
|
||||||
|
description: AvailableReplicas is the total number of available runners which have been successfully registered to GitHub and still running. This corresponds to the sum of status.availableReplicas of all the runner replica sets.
|
||||||
type: integer
|
type: integer
|
||||||
desiredReplicas:
|
desiredReplicas:
|
||||||
description: Replicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
description: DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
||||||
type: integer
|
type: integer
|
||||||
readyReplicas:
|
readyReplicas:
|
||||||
|
description: ReadyReplicas is the total number of available runners which have been successfully registered to GitHub and still running. This corresponds to the sum of status.readyReplicas of all the runner replica sets.
|
||||||
|
type: integer
|
||||||
|
replicas:
|
||||||
|
description: Replicas is the total number of replicas
|
||||||
|
type: integer
|
||||||
|
updatedReplicas:
|
||||||
|
description: ReadyReplicas is the total number of available runners which have been successfully registered to GitHub and still running. This corresponds to status.replicas of the runner replica set that has the desired template hash.
|
||||||
type: integer
|
type: integer
|
||||||
required:
|
|
||||||
- availableReplicas
|
|
||||||
- readyReplicas
|
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
version: v1alpha1
|
version: v1alpha1
|
||||||
|
|||||||
@@ -10,12 +10,15 @@ spec:
|
|||||||
- JSONPath: .spec.replicas
|
- JSONPath: .spec.replicas
|
||||||
name: Desired
|
name: Desired
|
||||||
type: number
|
type: number
|
||||||
- JSONPath: .status.availableReplicas
|
- JSONPath: .status.replicas
|
||||||
name: Current
|
name: Current
|
||||||
type: number
|
type: number
|
||||||
- JSONPath: .status.readyReplicas
|
- JSONPath: .status.readyReplicas
|
||||||
name: Ready
|
name: Ready
|
||||||
type: number
|
type: number
|
||||||
|
- JSONPath: .metadata.creationTimestamp
|
||||||
|
name: Age
|
||||||
|
type: date
|
||||||
group: actions.summerwind.dev
|
group: actions.summerwind.dev
|
||||||
names:
|
names:
|
||||||
kind: RunnerReplicaSet
|
kind: RunnerReplicaSet
|
||||||
@@ -43,6 +46,37 @@ spec:
|
|||||||
replicas:
|
replicas:
|
||||||
nullable: true
|
nullable: true
|
||||||
type: integer
|
type: integer
|
||||||
|
selector:
|
||||||
|
description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.
|
||||||
|
nullable: true
|
||||||
|
properties:
|
||||||
|
matchExpressions:
|
||||||
|
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||||
|
items:
|
||||||
|
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
|
||||||
|
properties:
|
||||||
|
key:
|
||||||
|
description: key is the label key that the selector applies to.
|
||||||
|
type: string
|
||||||
|
operator:
|
||||||
|
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||||
|
type: string
|
||||||
|
values:
|
||||||
|
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- key
|
||||||
|
- operator
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
matchLabels:
|
||||||
|
additionalProperties:
|
||||||
|
type: string
|
||||||
|
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
template:
|
template:
|
||||||
properties:
|
properties:
|
||||||
metadata:
|
metadata:
|
||||||
@@ -402,6 +436,38 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
dockerEnabled:
|
dockerEnabled:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
dockerMTU:
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
dockerRegistryMirror:
|
||||||
|
type: string
|
||||||
|
dockerVolumeMounts:
|
||||||
|
items:
|
||||||
|
description: VolumeMount describes a mounting of a Volume within a container.
|
||||||
|
properties:
|
||||||
|
mountPath:
|
||||||
|
description: Path within the container at which the volume should be mounted. Must not contain ':'.
|
||||||
|
type: string
|
||||||
|
mountPropagation:
|
||||||
|
description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
description: This must match the Name of a Volume.
|
||||||
|
type: string
|
||||||
|
readOnly:
|
||||||
|
description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
|
||||||
|
type: boolean
|
||||||
|
subPath:
|
||||||
|
description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
|
||||||
|
type: string
|
||||||
|
subPathExpr:
|
||||||
|
description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- mountPath
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
dockerdContainerResources:
|
dockerdContainerResources:
|
||||||
description: ResourceRequirements describes the compute resource requirements.
|
description: ResourceRequirements describes the compute resource requirements.
|
||||||
properties:
|
properties:
|
||||||
@@ -426,6 +492,9 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
dockerdWithinRunnerContainer:
|
dockerdWithinRunnerContainer:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enterprise:
|
||||||
|
pattern: ^[^/]+$
|
||||||
|
type: string
|
||||||
env:
|
env:
|
||||||
items:
|
items:
|
||||||
description: EnvVar represents an environment variable present in a Container.
|
description: EnvVar represents an environment variable present in a Container.
|
||||||
@@ -534,6 +603,8 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
ephemeral:
|
||||||
|
type: boolean
|
||||||
ephemeralContainers:
|
ephemeralContainers:
|
||||||
items:
|
items:
|
||||||
description: An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.
|
description: An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.
|
||||||
@@ -543,6 +614,20 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
|
hostAliases:
|
||||||
|
items:
|
||||||
|
description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
|
||||||
|
properties:
|
||||||
|
hostnames:
|
||||||
|
description: Hostnames for the above IP address.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
ip:
|
||||||
|
description: IP address of the host file entry.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
image:
|
image:
|
||||||
type: string
|
type: string
|
||||||
imagePullPolicy:
|
imagePullPolicy:
|
||||||
@@ -600,6 +685,9 @@ spec:
|
|||||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
|
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
|
runtimeClassName:
|
||||||
|
description: 'RuntimeClassName is the container runtime configuration that containers should run under. More info: https://kubernetes.io/docs/concepts/containers/runtime-class'
|
||||||
|
type: string
|
||||||
securityContext:
|
securityContext:
|
||||||
description: PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.
|
description: PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.
|
||||||
properties:
|
properties:
|
||||||
@@ -731,6 +819,12 @@ spec:
|
|||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
volumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
volumes:
|
volumes:
|
||||||
items:
|
items:
|
||||||
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||||
@@ -1543,8 +1637,13 @@ spec:
|
|||||||
status:
|
status:
|
||||||
properties:
|
properties:
|
||||||
availableReplicas:
|
availableReplicas:
|
||||||
|
description: AvailableReplicas is the number of runners that are created and Runnning. This is currently same as ReadyReplicas but perserved for future use.
|
||||||
type: integer
|
type: integer
|
||||||
readyReplicas:
|
readyReplicas:
|
||||||
|
description: ReadyReplicas is the number of runners that are created and Runnning.
|
||||||
|
type: integer
|
||||||
|
replicas:
|
||||||
|
description: Replicas is the number of runners that are created and still being managed by this runner replica set.
|
||||||
type: integer
|
type: integer
|
||||||
required:
|
required:
|
||||||
- availableReplicas
|
- availableReplicas
|
||||||
|
|||||||
@@ -7,6 +7,9 @@ metadata:
|
|||||||
name: runners.actions.summerwind.dev
|
name: runners.actions.summerwind.dev
|
||||||
spec:
|
spec:
|
||||||
additionalPrinterColumns:
|
additionalPrinterColumns:
|
||||||
|
- JSONPath: .spec.enterprise
|
||||||
|
name: Enterprise
|
||||||
|
type: string
|
||||||
- JSONPath: .spec.organization
|
- JSONPath: .spec.organization
|
||||||
name: Organization
|
name: Organization
|
||||||
type: string
|
type: string
|
||||||
@@ -19,6 +22,9 @@ spec:
|
|||||||
- JSONPath: .status.phase
|
- JSONPath: .status.phase
|
||||||
name: Status
|
name: Status
|
||||||
type: string
|
type: string
|
||||||
|
- JSONPath: .metadata.creationTimestamp
|
||||||
|
name: Age
|
||||||
|
type: date
|
||||||
group: actions.summerwind.dev
|
group: actions.summerwind.dev
|
||||||
names:
|
names:
|
||||||
kind: Runner
|
kind: Runner
|
||||||
@@ -395,6 +401,38 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
dockerEnabled:
|
dockerEnabled:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
dockerMTU:
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
dockerRegistryMirror:
|
||||||
|
type: string
|
||||||
|
dockerVolumeMounts:
|
||||||
|
items:
|
||||||
|
description: VolumeMount describes a mounting of a Volume within a container.
|
||||||
|
properties:
|
||||||
|
mountPath:
|
||||||
|
description: Path within the container at which the volume should be mounted. Must not contain ':'.
|
||||||
|
type: string
|
||||||
|
mountPropagation:
|
||||||
|
description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
description: This must match the Name of a Volume.
|
||||||
|
type: string
|
||||||
|
readOnly:
|
||||||
|
description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
|
||||||
|
type: boolean
|
||||||
|
subPath:
|
||||||
|
description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
|
||||||
|
type: string
|
||||||
|
subPathExpr:
|
||||||
|
description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- mountPath
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
dockerdContainerResources:
|
dockerdContainerResources:
|
||||||
description: ResourceRequirements describes the compute resource requirements.
|
description: ResourceRequirements describes the compute resource requirements.
|
||||||
properties:
|
properties:
|
||||||
@@ -419,6 +457,9 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
dockerdWithinRunnerContainer:
|
dockerdWithinRunnerContainer:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enterprise:
|
||||||
|
pattern: ^[^/]+$
|
||||||
|
type: string
|
||||||
env:
|
env:
|
||||||
items:
|
items:
|
||||||
description: EnvVar represents an environment variable present in a Container.
|
description: EnvVar represents an environment variable present in a Container.
|
||||||
@@ -527,6 +568,8 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
ephemeral:
|
||||||
|
type: boolean
|
||||||
ephemeralContainers:
|
ephemeralContainers:
|
||||||
items:
|
items:
|
||||||
description: An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.
|
description: An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.
|
||||||
@@ -536,6 +579,20 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
|
hostAliases:
|
||||||
|
items:
|
||||||
|
description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
|
||||||
|
properties:
|
||||||
|
hostnames:
|
||||||
|
description: Hostnames for the above IP address.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
ip:
|
||||||
|
description: IP address of the host file entry.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
image:
|
image:
|
||||||
type: string
|
type: string
|
||||||
imagePullPolicy:
|
imagePullPolicy:
|
||||||
@@ -593,6 +650,9 @@ spec:
|
|||||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
|
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
|
runtimeClassName:
|
||||||
|
description: 'RuntimeClassName is the container runtime configuration that containers should run under. More info: https://kubernetes.io/docs/concepts/containers/runtime-class'
|
||||||
|
type: string
|
||||||
securityContext:
|
securityContext:
|
||||||
description: PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.
|
description: PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.
|
||||||
properties:
|
properties:
|
||||||
@@ -724,6 +784,12 @@ spec:
|
|||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
volumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
volumes:
|
volumes:
|
||||||
items:
|
items:
|
||||||
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||||
@@ -1532,6 +1598,10 @@ spec:
|
|||||||
status:
|
status:
|
||||||
description: RunnerStatus defines the observed state of Runner
|
description: RunnerStatus defines the observed state of Runner
|
||||||
properties:
|
properties:
|
||||||
|
lastRegistrationCheckTime:
|
||||||
|
format: date-time
|
||||||
|
nullable: true
|
||||||
|
type: string
|
||||||
message:
|
message:
|
||||||
type: string
|
type: string
|
||||||
phase:
|
phase:
|
||||||
@@ -1541,6 +1611,8 @@ spec:
|
|||||||
registration:
|
registration:
|
||||||
description: RunnerStatusRegistration contains runner registration status
|
description: RunnerStatusRegistration contains runner registration status
|
||||||
properties:
|
properties:
|
||||||
|
enterprise:
|
||||||
|
type: string
|
||||||
expiresAt:
|
expiresAt:
|
||||||
format: date-time
|
format: date-time
|
||||||
type: string
|
type: string
|
||||||
@@ -1558,11 +1630,6 @@ spec:
|
|||||||
- expiresAt
|
- expiresAt
|
||||||
- token
|
- token
|
||||||
type: object
|
type: object
|
||||||
required:
|
|
||||||
- message
|
|
||||||
- phase
|
|
||||||
- reason
|
|
||||||
- registration
|
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
version: v1alpha1
|
version: v1alpha1
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: kube-rbac-proxy
|
- name: kube-rbac-proxy
|
||||||
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1
|
image: quay.io/brancz/kube-rbac-proxy:v0.10.0
|
||||||
args:
|
args:
|
||||||
- "--secure-listen-address=0.0.0.0:8443"
|
- "--secure-listen-address=0.0.0.0:8443"
|
||||||
- "--upstream=http://127.0.0.1:8080/"
|
- "--upstream=http://127.0.0.1:8080/"
|
||||||
|
|||||||
@@ -4,5 +4,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1
|
|||||||
kind: Kustomization
|
kind: Kustomization
|
||||||
images:
|
images:
|
||||||
- name: controller
|
- name: controller
|
||||||
newName: summerwind/actions-runner-controller
|
newName: mumoshu/actions-runner-controller
|
||||||
newTag: latest
|
newTag: dev
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runners
|
- runners
|
||||||
|
sideEffects: None
|
||||||
- clientConfig:
|
- clientConfig:
|
||||||
caBundle: Cg==
|
caBundle: Cg==
|
||||||
service:
|
service:
|
||||||
@@ -60,6 +61,7 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runnerreplicasets
|
- runnerreplicasets
|
||||||
|
sideEffects: None
|
||||||
|
|
||||||
---
|
---
|
||||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
apiVersion: admissionregistration.k8s.io/v1beta1
|
||||||
@@ -86,6 +88,7 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runners
|
- runners
|
||||||
|
sideEffects: None
|
||||||
- clientConfig:
|
- clientConfig:
|
||||||
caBundle: Cg==
|
caBundle: Cg==
|
||||||
service:
|
service:
|
||||||
@@ -122,3 +125,4 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runnerreplicasets
|
- runnerreplicasets
|
||||||
|
sideEffects: None
|
||||||
|
|||||||
@@ -7,8 +7,11 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -19,7 +22,48 @@ const (
|
|||||||
defaultScaleDownFactor = 0.7
|
defaultScaleDownFactor = 0.7
|
||||||
)
|
)
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) determineDesiredReplicas(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
func getValueAvailableAt(now time.Time, from, to *time.Time, reservedValue int) *int {
|
||||||
|
if to != nil && now.After(*to) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if from != nil && now.Before(*from) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &reservedValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HorizontalRunnerAutoscalerReconciler) fetchSuggestedReplicasFromCache(hra v1alpha1.HorizontalRunnerAutoscaler) *int {
|
||||||
|
var entry *v1alpha1.CacheEntry
|
||||||
|
|
||||||
|
for i := range hra.Status.CacheEntries {
|
||||||
|
ent := hra.Status.CacheEntries[i]
|
||||||
|
|
||||||
|
if ent.Key != v1alpha1.CacheEntryKeyDesiredReplicas {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !time.Now().Before(ent.ExpirationTime.Time) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
entry = &ent
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry != nil {
|
||||||
|
v := getValueAvailableAt(time.Now(), nil, &entry.ExpirationTime.Time, entry.Value)
|
||||||
|
if v != nil {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||||
if hra.Spec.MinReplicas == nil {
|
if hra.Spec.MinReplicas == nil {
|
||||||
return nil, fmt.Errorf("horizontalrunnerautoscaler %s/%s is missing minReplicas", hra.Namespace, hra.Name)
|
return nil, fmt.Errorf("horizontalrunnerautoscaler %s/%s is missing minReplicas", hra.Namespace, hra.Name)
|
||||||
} else if hra.Spec.MaxReplicas == nil {
|
} else if hra.Spec.MaxReplicas == nil {
|
||||||
@@ -27,19 +71,68 @@ func (r *HorizontalRunnerAutoscalerReconciler) determineDesiredReplicas(rd v1alp
|
|||||||
}
|
}
|
||||||
|
|
||||||
metrics := hra.Spec.Metrics
|
metrics := hra.Spec.Metrics
|
||||||
if len(metrics) == 0 || metrics[0].Type == v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns {
|
numMetrics := len(metrics)
|
||||||
return r.calculateReplicasByQueuedAndInProgressWorkflowRuns(rd, hra)
|
if numMetrics == 0 {
|
||||||
} else if metrics[0].Type == v1alpha1.AutoscalingMetricTypePercentageRunnersBusy {
|
if len(hra.Spec.ScaleUpTriggers) == 0 {
|
||||||
return r.calculateReplicasByPercentageRunnersBusy(rd, hra)
|
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(rd, hra, nil)
|
||||||
} else {
|
|
||||||
return nil, fmt.Errorf("validting autoscaling metrics: unsupported metric type %q", metrics[0].Type)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByQueuedAndInProgressWorkflowRuns(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
return nil, nil
|
||||||
|
} else if numMetrics > 2 {
|
||||||
|
return nil, fmt.Errorf("Too many autoscaling metrics configured: It must be 0 to 2, but got %d", numMetrics)
|
||||||
|
}
|
||||||
|
|
||||||
|
primaryMetric := metrics[0]
|
||||||
|
primaryMetricType := primaryMetric.Type
|
||||||
|
|
||||||
|
var (
|
||||||
|
suggested *int
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
switch primaryMetricType {
|
||||||
|
case v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns:
|
||||||
|
suggested, err = r.suggestReplicasByQueuedAndInProgressWorkflowRuns(rd, hra, &primaryMetric)
|
||||||
|
case v1alpha1.AutoscalingMetricTypePercentageRunnersBusy:
|
||||||
|
suggested, err = r.suggestReplicasByPercentageRunnersBusy(rd, hra, primaryMetric)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("validting autoscaling metrics: unsupported metric type %q", primaryMetric)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if suggested != nil && *suggested > 0 {
|
||||||
|
return suggested, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(metrics) == 1 {
|
||||||
|
// This is never supposed to happen but anyway-
|
||||||
|
// Fall-back to `minReplicas + capacityReservedThroughWebhook`.
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point, we are sure that there are exactly 2 Metrics entries.
|
||||||
|
|
||||||
|
fallbackMetric := metrics[1]
|
||||||
|
fallbackMetricType := fallbackMetric.Type
|
||||||
|
|
||||||
|
if primaryMetricType != v1alpha1.AutoscalingMetricTypePercentageRunnersBusy ||
|
||||||
|
fallbackMetricType != v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns {
|
||||||
|
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"invalid HRA Spec: Metrics[0] of %s cannot be combined with Metrics[1] of %s: The only allowed combination is 0=PercentageRunnersBusy and 1=TotalNumberOfQueuedAndInProgressWorkflowRuns",
|
||||||
|
primaryMetricType, fallbackMetricType,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(rd, hra, &fallbackMetric)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgressWorkflowRuns(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler, metrics *v1alpha1.MetricSpec) (*int, error) {
|
||||||
|
|
||||||
var repos [][]string
|
var repos [][]string
|
||||||
metrics := hra.Spec.Metrics
|
|
||||||
repoID := rd.Spec.Template.Spec.Repository
|
repoID := rd.Spec.Template.Spec.Repository
|
||||||
if repoID == "" {
|
if repoID == "" {
|
||||||
orgName := rd.Spec.Template.Spec.Organization
|
orgName := rd.Spec.Template.Spec.Organization
|
||||||
@@ -47,11 +140,18 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByQueuedAndInPro
|
|||||||
return nil, fmt.Errorf("asserting runner deployment spec to detect bug: spec.template.organization should not be empty on this code path")
|
return nil, fmt.Errorf("asserting runner deployment spec to detect bug: spec.template.organization should not be empty on this code path")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(metrics[0].RepositoryNames) == 0 {
|
// In case it's an organizational runners deployment without any scaling metrics defined,
|
||||||
|
// we assume that the desired replicas should always be `minReplicas + capacityReservedThroughWebhook`.
|
||||||
|
// See https://github.com/summerwind/actions-runner-controller/issues/377#issuecomment-793372693
|
||||||
|
if metrics == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(metrics.RepositoryNames) == 0 {
|
||||||
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].repositoryNames is required and must have one more more entries for organizational runner deployment")
|
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].repositoryNames is required and must have one more more entries for organizational runner deployment")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, repoName := range metrics[0].RepositoryNames {
|
for _, repoName := range metrics.RepositoryNames {
|
||||||
repos = append(repos, []string{orgName, repoName})
|
repos = append(repos, []string{orgName, repoName})
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -96,12 +196,12 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByQueuedAndInPro
|
|||||||
|
|
||||||
for _, repo := range repos {
|
for _, repo := range repos {
|
||||||
user, repoName := repo[0], repo[1]
|
user, repoName := repo[0], repo[1]
|
||||||
list, _, err := r.GitHubClient.Actions.ListRepositoryWorkflowRuns(context.TODO(), user, repoName, nil)
|
workflowRuns, err := r.GitHubClient.ListRepositoryWorkflowRuns(context.TODO(), user, repoName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, run := range list.WorkflowRuns {
|
for _, run := range workflowRuns {
|
||||||
total++
|
total++
|
||||||
|
|
||||||
// In May 2020, there are only 3 statuses.
|
// In May 2020, there are only 3 statuses.
|
||||||
@@ -121,43 +221,24 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByQueuedAndInPro
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
minReplicas := *hra.Spec.MinReplicas
|
|
||||||
maxReplicas := *hra.Spec.MaxReplicas
|
|
||||||
necessaryReplicas := queued + inProgress
|
necessaryReplicas := queued + inProgress
|
||||||
|
|
||||||
var desiredReplicas int
|
|
||||||
|
|
||||||
if necessaryReplicas < minReplicas {
|
|
||||||
desiredReplicas = minReplicas
|
|
||||||
} else if necessaryReplicas > maxReplicas {
|
|
||||||
desiredReplicas = maxReplicas
|
|
||||||
} else {
|
|
||||||
desiredReplicas = necessaryReplicas
|
|
||||||
}
|
|
||||||
|
|
||||||
rd.Status.Replicas = &desiredReplicas
|
|
||||||
replicas := desiredReplicas
|
|
||||||
|
|
||||||
r.Log.V(1).Info(
|
r.Log.V(1).Info(
|
||||||
"Calculated desired replicas",
|
fmt.Sprintf("Suggested desired replicas of %d by TotalNumberOfQueuedAndInProgressWorkflowRuns", necessaryReplicas),
|
||||||
"computed_replicas_desired", desiredReplicas,
|
|
||||||
"spec_replicas_min", minReplicas,
|
|
||||||
"spec_replicas_max", maxReplicas,
|
|
||||||
"workflow_runs_completed", completed,
|
"workflow_runs_completed", completed,
|
||||||
"workflow_runs_in_progress", inProgress,
|
"workflow_runs_in_progress", inProgress,
|
||||||
"workflow_runs_queued", queued,
|
"workflow_runs_queued", queued,
|
||||||
"workflow_runs_unknown", unknown,
|
"workflow_runs_unknown", unknown,
|
||||||
|
"namespace", hra.Namespace,
|
||||||
|
"runner_deployment", rd.Name,
|
||||||
|
"horizontal_runner_autoscaler", hra.Name,
|
||||||
)
|
)
|
||||||
|
|
||||||
return &replicas, nil
|
return &necessaryReplicas, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByPercentageRunnersBusy(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunnersBusy(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler, metrics v1alpha1.MetricSpec) (*int, error) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
orgName := rd.Spec.Template.Spec.Organization
|
|
||||||
minReplicas := *hra.Spec.MinReplicas
|
|
||||||
maxReplicas := *hra.Spec.MaxReplicas
|
|
||||||
metrics := hra.Spec.Metrics[0]
|
|
||||||
scaleUpThreshold := defaultScaleUpThreshold
|
scaleUpThreshold := defaultScaleUpThreshold
|
||||||
scaleDownThreshold := defaultScaleDownThreshold
|
scaleDownThreshold := defaultScaleDownThreshold
|
||||||
scaleUpFactor := defaultScaleUpFactor
|
scaleUpFactor := defaultScaleUpFactor
|
||||||
@@ -178,14 +259,34 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByPercentageRunn
|
|||||||
|
|
||||||
scaleDownThreshold = sdt
|
scaleDownThreshold = sdt
|
||||||
}
|
}
|
||||||
|
|
||||||
|
scaleUpAdjustment := metrics.ScaleUpAdjustment
|
||||||
|
if scaleUpAdjustment != 0 {
|
||||||
|
if metrics.ScaleUpAdjustment < 0 {
|
||||||
|
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].scaleUpAdjustment cannot be lower than 0")
|
||||||
|
}
|
||||||
|
|
||||||
if metrics.ScaleUpFactor != "" {
|
if metrics.ScaleUpFactor != "" {
|
||||||
|
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[]: scaleUpAdjustment and scaleUpFactor cannot be specified together")
|
||||||
|
}
|
||||||
|
} else if metrics.ScaleUpFactor != "" {
|
||||||
suf, err := strconv.ParseFloat(metrics.ScaleUpFactor, 64)
|
suf, err := strconv.ParseFloat(metrics.ScaleUpFactor, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].scaleUpFactor cannot be parsed into a float64")
|
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].scaleUpFactor cannot be parsed into a float64")
|
||||||
}
|
}
|
||||||
scaleUpFactor = suf
|
scaleUpFactor = suf
|
||||||
}
|
}
|
||||||
|
|
||||||
|
scaleDownAdjustment := metrics.ScaleDownAdjustment
|
||||||
|
if scaleDownAdjustment != 0 {
|
||||||
|
if metrics.ScaleDownAdjustment < 0 {
|
||||||
|
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].scaleDownAdjustment cannot be lower than 0")
|
||||||
|
}
|
||||||
|
|
||||||
if metrics.ScaleDownFactor != "" {
|
if metrics.ScaleDownFactor != "" {
|
||||||
|
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[]: scaleDownAdjustment and scaleDownFactor cannot be specified together")
|
||||||
|
}
|
||||||
|
} else if metrics.ScaleDownFactor != "" {
|
||||||
sdf, err := strconv.ParseFloat(metrics.ScaleDownFactor, 64)
|
sdf, err := strconv.ParseFloat(metrics.ScaleDownFactor, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].scaleDownFactor cannot be parsed into a float64")
|
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].scaleDownFactor cannot be parsed into a float64")
|
||||||
@@ -195,55 +296,114 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByPercentageRunn
|
|||||||
|
|
||||||
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
|
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
|
||||||
var runnerList v1alpha1.RunnerList
|
var runnerList v1alpha1.RunnerList
|
||||||
if err := r.List(ctx, &runnerList, client.InNamespace(rd.Namespace)); err != nil {
|
|
||||||
|
var opts []client.ListOption
|
||||||
|
|
||||||
|
opts = append(opts, client.InNamespace(rd.Namespace))
|
||||||
|
|
||||||
|
selector, err := metav1.LabelSelectorAsSelector(getSelector(&rd))
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
opts = append(opts, client.MatchingLabelsSelector{Selector: selector})
|
||||||
|
|
||||||
|
r.Log.V(2).Info("Finding runners with selector", "ns", rd.Namespace)
|
||||||
|
|
||||||
|
if err := r.List(
|
||||||
|
ctx,
|
||||||
|
&runnerList,
|
||||||
|
opts...,
|
||||||
|
); err != nil {
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
runnerMap := make(map[string]struct{})
|
runnerMap := make(map[string]struct{})
|
||||||
for _, items := range runnerList.Items {
|
for _, items := range runnerList.Items {
|
||||||
runnerMap[items.Name] = struct{}{}
|
runnerMap[items.Name] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
enterprise = rd.Spec.Template.Spec.Enterprise
|
||||||
|
organization = rd.Spec.Template.Spec.Organization
|
||||||
|
repository = rd.Spec.Template.Spec.Repository
|
||||||
|
)
|
||||||
|
|
||||||
// ListRunners will return all runners managed by GitHub - not restricted to ns
|
// ListRunners will return all runners managed by GitHub - not restricted to ns
|
||||||
runners, err := r.GitHubClient.ListRunners(ctx, orgName, "")
|
runners, err := r.GitHubClient.ListRunners(
|
||||||
|
ctx,
|
||||||
|
enterprise,
|
||||||
|
organization,
|
||||||
|
repository)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
numRunners := len(runnerList.Items)
|
|
||||||
numRunnersBusy := 0
|
var desiredReplicasBefore int
|
||||||
|
|
||||||
|
if v := rd.Spec.Replicas; v == nil {
|
||||||
|
desiredReplicasBefore = 1
|
||||||
|
} else {
|
||||||
|
desiredReplicasBefore = *v
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
numRunners int
|
||||||
|
numRunnersRegistered int
|
||||||
|
numRunnersBusy int
|
||||||
|
)
|
||||||
|
|
||||||
|
numRunners = len(runnerList.Items)
|
||||||
|
|
||||||
for _, runner := range runners {
|
for _, runner := range runners {
|
||||||
if _, ok := runnerMap[*runner.Name]; ok && runner.GetBusy() {
|
if _, ok := runnerMap[*runner.Name]; ok {
|
||||||
|
numRunnersRegistered++
|
||||||
|
|
||||||
|
if runner.GetBusy() {
|
||||||
numRunnersBusy++
|
numRunnersBusy++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var desiredReplicas int
|
var desiredReplicas int
|
||||||
fractionBusy := float64(numRunnersBusy) / float64(numRunners)
|
fractionBusy := float64(numRunnersBusy) / float64(desiredReplicasBefore)
|
||||||
if fractionBusy >= scaleUpThreshold {
|
if fractionBusy >= scaleUpThreshold {
|
||||||
desiredReplicas = int(math.Ceil(float64(numRunners) * scaleUpFactor))
|
if scaleUpAdjustment > 0 {
|
||||||
|
desiredReplicas = desiredReplicasBefore + scaleUpAdjustment
|
||||||
|
} else {
|
||||||
|
desiredReplicas = int(math.Ceil(float64(desiredReplicasBefore) * scaleUpFactor))
|
||||||
|
}
|
||||||
} else if fractionBusy < scaleDownThreshold {
|
} else if fractionBusy < scaleDownThreshold {
|
||||||
desiredReplicas = int(float64(numRunners) * scaleDownFactor)
|
if scaleDownAdjustment > 0 {
|
||||||
|
desiredReplicas = desiredReplicasBefore - scaleDownAdjustment
|
||||||
|
} else {
|
||||||
|
desiredReplicas = int(float64(desiredReplicasBefore) * scaleDownFactor)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
desiredReplicas = *rd.Spec.Replicas
|
desiredReplicas = *rd.Spec.Replicas
|
||||||
}
|
}
|
||||||
|
|
||||||
if desiredReplicas < minReplicas {
|
// NOTES for operators:
|
||||||
desiredReplicas = minReplicas
|
//
|
||||||
} else if desiredReplicas > maxReplicas {
|
// - num_runners can be as twice as large as replicas_desired_before while
|
||||||
desiredReplicas = maxReplicas
|
// the runnerdeployment controller is replacing RunnerReplicaSet for runner update.
|
||||||
}
|
|
||||||
|
|
||||||
r.Log.V(1).Info(
|
r.Log.V(1).Info(
|
||||||
"Calculated desired replicas",
|
fmt.Sprintf("Suggested desired replicas of %d by PercentageRunnersBusy", desiredReplicas),
|
||||||
"computed_replicas_desired", desiredReplicas,
|
"replicas_desired_before", desiredReplicasBefore,
|
||||||
"spec_replicas_min", minReplicas,
|
"replicas_desired", desiredReplicas,
|
||||||
"spec_replicas_max", maxReplicas,
|
|
||||||
"current_replicas", rd.Spec.Replicas,
|
|
||||||
"num_runners", numRunners,
|
"num_runners", numRunners,
|
||||||
|
"num_runners_registered", numRunnersRegistered,
|
||||||
"num_runners_busy", numRunnersBusy,
|
"num_runners_busy", numRunnersBusy,
|
||||||
|
"namespace", hra.Namespace,
|
||||||
|
"runner_deployment", rd.Name,
|
||||||
|
"horizontal_runner_autoscaler", hra.Name,
|
||||||
|
"enterprise", enterprise,
|
||||||
|
"organization", organization,
|
||||||
|
"repository", repository,
|
||||||
)
|
)
|
||||||
|
|
||||||
rd.Status.Replicas = &desiredReplicas
|
return &desiredReplicas, nil
|
||||||
replicas := desiredReplicas
|
|
||||||
|
|
||||||
return &replicas, nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -47,7 +47,11 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
min *int
|
min *int
|
||||||
sReplicas *int
|
sReplicas *int
|
||||||
sTime *metav1.Time
|
sTime *metav1.Time
|
||||||
|
|
||||||
workflowRuns string
|
workflowRuns string
|
||||||
|
workflowRuns_queued string
|
||||||
|
workflowRuns_in_progress string
|
||||||
|
|
||||||
workflowJobs map[int]string
|
workflowJobs map[int]string
|
||||||
want int
|
want int
|
||||||
err string
|
err string
|
||||||
@@ -59,6 +63,8 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||||
want: 3,
|
want: 3,
|
||||||
},
|
},
|
||||||
// 2 demanded, max at 3, currently 3, delay scaling down due to grace period
|
// 2 demanded, max at 3, currently 3, delay scaling down due to grace period
|
||||||
@@ -68,7 +74,9 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
sReplicas: intPtr(3),
|
sReplicas: intPtr(3),
|
||||||
sTime: &metav1Now,
|
sTime: &metav1Now,
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
want: 3,
|
want: 3,
|
||||||
},
|
},
|
||||||
// 3 demanded, max at 2
|
// 3 demanded, max at 2
|
||||||
@@ -77,6 +85,8 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(2),
|
max: intPtr(2),
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||||
want: 2,
|
want: 2,
|
||||||
},
|
},
|
||||||
// 2 demanded, min at 2
|
// 2 demanded, min at 2
|
||||||
@@ -85,6 +95,8 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
want: 2,
|
want: 2,
|
||||||
},
|
},
|
||||||
// 1 demanded, min at 2
|
// 1 demanded, min at 2
|
||||||
@@ -93,6 +105,8 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
want: 2,
|
want: 2,
|
||||||
},
|
},
|
||||||
// 1 demanded, min at 2
|
// 1 demanded, min at 2
|
||||||
@@ -101,6 +115,8 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
want: 2,
|
want: 2,
|
||||||
},
|
},
|
||||||
// 1 demanded, min at 1
|
// 1 demanded, min at 1
|
||||||
@@ -109,6 +125,8 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
min: intPtr(1),
|
min: intPtr(1),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
want: 1,
|
want: 1,
|
||||||
},
|
},
|
||||||
// 1 demanded, min at 1
|
// 1 demanded, min at 1
|
||||||
@@ -117,6 +135,8 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
min: intPtr(1),
|
min: intPtr(1),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
want: 1,
|
want: 1,
|
||||||
},
|
},
|
||||||
// fixed at 3
|
// fixed at 3
|
||||||
@@ -126,6 +146,8 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
fixed: intPtr(3),
|
fixed: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||||
want: 3,
|
want: 3,
|
||||||
},
|
},
|
||||||
|
|
||||||
@@ -136,6 +158,8 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(10),
|
max: intPtr(10),
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"id": 1, "status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}]}"`,
|
||||||
workflowJobs: map[int]string{
|
workflowJobs: map[int]string{
|
||||||
1: `{"jobs": [{"status":"queued"}, {"status":"queued"}]}`,
|
1: `{"jobs": [{"status":"queued"}, {"status":"queued"}]}`,
|
||||||
2: `{"jobs": [{"status": "in_progress"}, {"status":"completed"}]}`,
|
2: `{"jobs": [{"status": "in_progress"}, {"status":"completed"}]}`,
|
||||||
@@ -157,7 +181,11 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
_ = v1alpha1.AddToScheme(scheme)
|
_ = v1alpha1.AddToScheme(scheme)
|
||||||
|
|
||||||
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
|
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
|
||||||
server := fake.NewServer(fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns), fake.WithListWorkflowJobsResponse(200, tc.workflowJobs))
|
server := fake.NewServer(
|
||||||
|
fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns, tc.workflowRuns_queued, tc.workflowRuns_in_progress),
|
||||||
|
fake.WithListWorkflowJobsResponse(200, tc.workflowJobs),
|
||||||
|
fake.WithListRunnersResponse(200, fake.RunnersListBody),
|
||||||
|
)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
client := newGithubClient(server)
|
client := newGithubClient(server)
|
||||||
|
|
||||||
@@ -181,7 +209,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
Replicas: tc.fixed,
|
Replicas: tc.fixed,
|
||||||
},
|
},
|
||||||
Status: v1alpha1.RunnerDeploymentStatus{
|
Status: v1alpha1.RunnerDeploymentStatus{
|
||||||
Replicas: tc.sReplicas,
|
DesiredReplicas: tc.sReplicas,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -196,7 +224,12 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
got, err := h.computeReplicas(rd, hra)
|
minReplicas, _, _, err := h.getMinReplicas(log, metav1Now.Time, hra)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
got, _, _, err := h.computeReplicasWithCache(log, metav1Now.Time, rd, hra, minReplicas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if tc.err == "" {
|
if tc.err == "" {
|
||||||
t.Fatalf("unexpected error: expected none, got %v", err)
|
t.Fatalf("unexpected error: expected none, got %v", err)
|
||||||
@@ -206,12 +239,8 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if got == nil {
|
if got != tc.want {
|
||||||
t.Fatalf("unexpected value of rs.Spec.Replicas: nil")
|
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, got)
|
||||||
}
|
|
||||||
|
|
||||||
if *got != tc.want {
|
|
||||||
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, *got)
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -231,7 +260,11 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
min *int
|
min *int
|
||||||
sReplicas *int
|
sReplicas *int
|
||||||
sTime *metav1.Time
|
sTime *metav1.Time
|
||||||
|
|
||||||
workflowRuns string
|
workflowRuns string
|
||||||
|
workflowRuns_queued string
|
||||||
|
workflowRuns_in_progress string
|
||||||
|
|
||||||
workflowJobs map[int]string
|
workflowJobs map[int]string
|
||||||
want int
|
want int
|
||||||
err string
|
err string
|
||||||
@@ -243,6 +276,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||||
want: 3,
|
want: 3,
|
||||||
},
|
},
|
||||||
// 2 demanded, max at 3, currently 3, delay scaling down due to grace period
|
// 2 demanded, max at 3, currently 3, delay scaling down due to grace period
|
||||||
@@ -254,6 +289,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
sReplicas: intPtr(3),
|
sReplicas: intPtr(3),
|
||||||
sTime: &metav1Now,
|
sTime: &metav1Now,
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
want: 3,
|
want: 3,
|
||||||
},
|
},
|
||||||
// 3 demanded, max at 2
|
// 3 demanded, max at 2
|
||||||
@@ -263,6 +300,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(2),
|
max: intPtr(2),
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||||
want: 2,
|
want: 2,
|
||||||
},
|
},
|
||||||
// 2 demanded, min at 2
|
// 2 demanded, min at 2
|
||||||
@@ -272,6 +311,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
want: 2,
|
want: 2,
|
||||||
},
|
},
|
||||||
// 1 demanded, min at 2
|
// 1 demanded, min at 2
|
||||||
@@ -281,6 +322,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
want: 2,
|
want: 2,
|
||||||
},
|
},
|
||||||
// 1 demanded, min at 2
|
// 1 demanded, min at 2
|
||||||
@@ -290,6 +333,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
want: 2,
|
want: 2,
|
||||||
},
|
},
|
||||||
// 1 demanded, min at 1
|
// 1 demanded, min at 1
|
||||||
@@ -299,6 +344,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
min: intPtr(1),
|
min: intPtr(1),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
want: 1,
|
want: 1,
|
||||||
},
|
},
|
||||||
// 1 demanded, min at 1
|
// 1 demanded, min at 1
|
||||||
@@ -308,6 +355,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
min: intPtr(1),
|
min: intPtr(1),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
want: 1,
|
want: 1,
|
||||||
},
|
},
|
||||||
// fixed at 3
|
// fixed at 3
|
||||||
@@ -317,7 +366,9 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
fixed: intPtr(1),
|
fixed: intPtr(1),
|
||||||
min: intPtr(1),
|
min: intPtr(1),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"},{"status":"in_progress"},{"status":"in_progress"}]}"`,
|
||||||
want: 3,
|
want: 3,
|
||||||
},
|
},
|
||||||
// org runner, fixed at 3
|
// org runner, fixed at 3
|
||||||
@@ -327,7 +378,9 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
fixed: intPtr(1),
|
fixed: intPtr(1),
|
||||||
min: intPtr(1),
|
min: intPtr(1),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"},{"status":"in_progress"},{"status":"in_progress"}]}"`,
|
||||||
want: 3,
|
want: 3,
|
||||||
},
|
},
|
||||||
// org runner, 1 demanded, min at 1, no repos
|
// org runner, 1 demanded, min at 1, no repos
|
||||||
@@ -336,6 +389,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
min: intPtr(1),
|
min: intPtr(1),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
err: "validating autoscaling metrics: spec.autoscaling.metrics[].repositoryNames is required and must have one more more entries for organizational runner deployment",
|
err: "validating autoscaling metrics: spec.autoscaling.metrics[].repositoryNames is required and must have one more more entries for organizational runner deployment",
|
||||||
},
|
},
|
||||||
|
|
||||||
@@ -347,6 +402,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(10),
|
max: intPtr(10),
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"id": 1, "status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
workflowJobs: map[int]string{
|
workflowJobs: map[int]string{
|
||||||
1: `{"jobs": [{"status":"queued"}, {"status":"queued"}]}`,
|
1: `{"jobs": [{"status":"queued"}, {"status":"queued"}]}`,
|
||||||
2: `{"jobs": [{"status": "in_progress"}, {"status":"completed"}]}`,
|
2: `{"jobs": [{"status": "in_progress"}, {"status":"completed"}]}`,
|
||||||
@@ -368,7 +425,13 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
_ = v1alpha1.AddToScheme(scheme)
|
_ = v1alpha1.AddToScheme(scheme)
|
||||||
|
|
||||||
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
|
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
|
||||||
server := fake.NewServer(fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns), fake.WithListWorkflowJobsResponse(200, tc.workflowJobs))
|
t.Helper()
|
||||||
|
|
||||||
|
server := fake.NewServer(
|
||||||
|
fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns, tc.workflowRuns_queued, tc.workflowRuns_in_progress),
|
||||||
|
fake.WithListWorkflowJobsResponse(200, tc.workflowJobs),
|
||||||
|
fake.WithListRunnersResponse(200, fake.RunnersListBody),
|
||||||
|
)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
client := newGithubClient(server)
|
client := newGithubClient(server)
|
||||||
|
|
||||||
@@ -383,7 +446,17 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
Name: "testrd",
|
Name: "testrd",
|
||||||
},
|
},
|
||||||
Spec: v1alpha1.RunnerDeploymentSpec{
|
Spec: v1alpha1.RunnerDeploymentSpec{
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
Template: v1alpha1.RunnerTemplate{
|
Template: v1alpha1.RunnerTemplate{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
Spec: v1alpha1.RunnerSpec{
|
Spec: v1alpha1.RunnerSpec{
|
||||||
Organization: tc.org,
|
Organization: tc.org,
|
||||||
},
|
},
|
||||||
@@ -391,7 +464,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
Replicas: tc.fixed,
|
Replicas: tc.fixed,
|
||||||
},
|
},
|
||||||
Status: v1alpha1.RunnerDeploymentStatus{
|
Status: v1alpha1.RunnerDeploymentStatus{
|
||||||
Replicas: tc.sReplicas,
|
DesiredReplicas: tc.sReplicas,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -415,7 +488,12 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
got, err := h.computeReplicas(rd, hra)
|
minReplicas, _, _, err := h.getMinReplicas(log, metav1Now.Time, hra)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
got, _, _, err := h.computeReplicasWithCache(log, metav1Now.Time, rd, hra, minReplicas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if tc.err == "" {
|
if tc.err == "" {
|
||||||
t.Fatalf("unexpected error: expected none, got %v", err)
|
t.Fatalf("unexpected error: expected none, got %v", err)
|
||||||
@@ -425,12 +503,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if got == nil {
|
if got != tc.want {
|
||||||
t.Fatalf("unexpected value of rs.Spec.Replicas: nil, wanted %v", tc.want)
|
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, got)
|
||||||
}
|
|
||||||
|
|
||||||
if *got != tc.want {
|
|
||||||
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, *got)
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
468
controllers/horizontal_runner_autoscaler_webhook.go
Normal file
468
controllers/horizontal_runner_autoscaler_webhook.go
Normal file
@@ -0,0 +1,468 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2020 The actions-runner-controller authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
gogithub "github.com/google/go-github/v33/github"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/client-go/tools/record"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
scaleTargetKey = "scaleTarget"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HorizontalRunnerAutoscalerGitHubWebhook autoscales a HorizontalRunnerAutoscaler and the RunnerDeployment on each
|
||||||
|
// GitHub Webhook received
|
||||||
|
type HorizontalRunnerAutoscalerGitHubWebhook struct {
|
||||||
|
client.Client
|
||||||
|
Log logr.Logger
|
||||||
|
Recorder record.EventRecorder
|
||||||
|
Scheme *runtime.Scheme
|
||||||
|
|
||||||
|
// SecretKeyBytes is the byte representation of the Webhook secret token
|
||||||
|
// the administrator is generated and specified in GitHub Web UI.
|
||||||
|
SecretKeyBytes []byte
|
||||||
|
|
||||||
|
// Namespace is the namespace to watch for HorizontalRunnerAutoscaler's to be
|
||||||
|
// scaled on Webhook.
|
||||||
|
// Set to empty for letting it watch for all namespaces.
|
||||||
|
Namespace string
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers,verbs=get;list;watch;create;update;patch;delete
|
||||||
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||||
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers/status,verbs=get;update;patch
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var (
|
||||||
|
ok bool
|
||||||
|
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if !ok {
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
msg := err.Error()
|
||||||
|
if written, err := w.Write([]byte(msg)); err != nil {
|
||||||
|
autoscaler.Log.Error(err, "failed writing http error response", "msg", msg, "written", written)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if r.Body != nil {
|
||||||
|
r.Body.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// respond ok to GET / e.g. for health check
|
||||||
|
if r.Method == http.MethodGet {
|
||||||
|
fmt.Fprintln(w, "webhook server is running")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var payload []byte
|
||||||
|
|
||||||
|
if len(autoscaler.SecretKeyBytes) > 0 {
|
||||||
|
payload, err = gogithub.ValidatePayload(r, autoscaler.SecretKeyBytes)
|
||||||
|
if err != nil {
|
||||||
|
autoscaler.Log.Error(err, "error validating request body")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
payload, err = ioutil.ReadAll(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
autoscaler.Log.Error(err, "error reading request body")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
webhookType := gogithub.WebHookType(r)
|
||||||
|
event, err := gogithub.ParseWebHook(webhookType, payload)
|
||||||
|
if err != nil {
|
||||||
|
var s string
|
||||||
|
if payload != nil {
|
||||||
|
s = string(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
autoscaler.Log.Error(err, "could not parse webhook", "webhookType", webhookType, "payload", s)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var target *ScaleTarget
|
||||||
|
|
||||||
|
log := autoscaler.Log.WithValues(
|
||||||
|
"event", webhookType,
|
||||||
|
"hookID", r.Header.Get("X-GitHub-Hook-ID"),
|
||||||
|
"delivery", r.Header.Get("X-GitHub-Delivery"),
|
||||||
|
)
|
||||||
|
|
||||||
|
switch e := event.(type) {
|
||||||
|
case *gogithub.PushEvent:
|
||||||
|
target, err = autoscaler.getScaleUpTarget(
|
||||||
|
context.TODO(),
|
||||||
|
log,
|
||||||
|
e.Repo.GetName(),
|
||||||
|
e.Repo.Owner.GetLogin(),
|
||||||
|
e.Repo.Owner.GetType(),
|
||||||
|
autoscaler.MatchPushEvent(e),
|
||||||
|
)
|
||||||
|
case *gogithub.PullRequestEvent:
|
||||||
|
target, err = autoscaler.getScaleUpTarget(
|
||||||
|
context.TODO(),
|
||||||
|
log,
|
||||||
|
e.Repo.GetName(),
|
||||||
|
e.Repo.Owner.GetLogin(),
|
||||||
|
e.Repo.Owner.GetType(),
|
||||||
|
autoscaler.MatchPullRequestEvent(e),
|
||||||
|
)
|
||||||
|
|
||||||
|
if pullRequest := e.PullRequest; pullRequest != nil {
|
||||||
|
log = log.WithValues(
|
||||||
|
"pullRequest.base.ref", e.PullRequest.Base.GetRef(),
|
||||||
|
"action", e.GetAction(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
case *gogithub.CheckRunEvent:
|
||||||
|
target, err = autoscaler.getScaleUpTarget(
|
||||||
|
context.TODO(),
|
||||||
|
log,
|
||||||
|
e.Repo.GetName(),
|
||||||
|
e.Repo.Owner.GetLogin(),
|
||||||
|
e.Repo.Owner.GetType(),
|
||||||
|
autoscaler.MatchCheckRunEvent(e),
|
||||||
|
)
|
||||||
|
|
||||||
|
if checkRun := e.GetCheckRun(); checkRun != nil {
|
||||||
|
log = log.WithValues(
|
||||||
|
"checkRun.status", checkRun.GetStatus(),
|
||||||
|
"action", e.GetAction(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
case *gogithub.PingEvent:
|
||||||
|
ok = true
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
|
||||||
|
msg := "pong"
|
||||||
|
|
||||||
|
if written, err := w.Write([]byte(msg)); err != nil {
|
||||||
|
log.Error(err, "failed writing http response", "msg", msg, "written", written)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("received ping event")
|
||||||
|
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
log.Info("unknown event type", "eventType", webhookType)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err, "handling check_run event")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if target == nil {
|
||||||
|
log.Info(
|
||||||
|
"Scale target not found. If this is unexpected, ensure that there is exactly one repository-wide or organizational runner deployment that matches this webhook event",
|
||||||
|
)
|
||||||
|
|
||||||
|
msg := "no horizontalrunnerautoscaler to scale for this github event"
|
||||||
|
|
||||||
|
ok = true
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
|
||||||
|
if written, err := w.Write([]byte(msg)); err != nil {
|
||||||
|
log.Error(err, "failed writing http response", "msg", msg, "written", written)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := autoscaler.tryScaleUp(context.TODO(), target); err != nil {
|
||||||
|
log.Error(err, "could not scale up")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ok = true
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
|
||||||
|
msg := fmt.Sprintf("scaled %s by 1", target.Name)
|
||||||
|
|
||||||
|
autoscaler.Log.Info(msg)
|
||||||
|
|
||||||
|
if written, err := w.Write([]byte(msg)); err != nil {
|
||||||
|
log.Error(err, "failed writing http response", "msg", msg, "written", written)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) findHRAsByKey(ctx context.Context, value string) ([]v1alpha1.HorizontalRunnerAutoscaler, error) {
|
||||||
|
ns := autoscaler.Namespace
|
||||||
|
|
||||||
|
var defaultListOpts []client.ListOption
|
||||||
|
|
||||||
|
if ns != "" {
|
||||||
|
defaultListOpts = append(defaultListOpts, client.InNamespace(ns))
|
||||||
|
}
|
||||||
|
|
||||||
|
var hras []v1alpha1.HorizontalRunnerAutoscaler
|
||||||
|
|
||||||
|
if value != "" {
|
||||||
|
opts := append([]client.ListOption{}, defaultListOpts...)
|
||||||
|
opts = append(opts, client.MatchingFields{scaleTargetKey: value})
|
||||||
|
|
||||||
|
if autoscaler.Namespace != "" {
|
||||||
|
opts = append(opts, client.InNamespace(autoscaler.Namespace))
|
||||||
|
}
|
||||||
|
|
||||||
|
var hraList v1alpha1.HorizontalRunnerAutoscalerList
|
||||||
|
|
||||||
|
if err := autoscaler.List(ctx, &hraList, opts...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range hraList.Items {
|
||||||
|
hras = append(hras, d)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return hras, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func matchTriggerConditionAgainstEvent(types []string, eventAction *string) bool {
|
||||||
|
if len(types) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if eventAction == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tpe := range types {
|
||||||
|
if tpe == *eventAction {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type ScaleTarget struct {
|
||||||
|
v1alpha1.HorizontalRunnerAutoscaler
|
||||||
|
v1alpha1.ScaleUpTrigger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) searchScaleTargets(hras []v1alpha1.HorizontalRunnerAutoscaler, f func(v1alpha1.ScaleUpTrigger) bool) []ScaleTarget {
|
||||||
|
var matched []ScaleTarget
|
||||||
|
|
||||||
|
for _, hra := range hras {
|
||||||
|
if !hra.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scaleUpTrigger := range hra.Spec.ScaleUpTriggers {
|
||||||
|
if !f(scaleUpTrigger) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
matched = append(matched, ScaleTarget{
|
||||||
|
HorizontalRunnerAutoscaler: hra,
|
||||||
|
ScaleUpTrigger: scaleUpTrigger,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return matched
|
||||||
|
}
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleTarget(ctx context.Context, name string, f func(v1alpha1.ScaleUpTrigger) bool) (*ScaleTarget, error) {
|
||||||
|
hras, err := autoscaler.findHRAsByKey(ctx, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
autoscaler.Log.V(1).Info(fmt.Sprintf("Found %d HRAs by key", len(hras)), "key", name)
|
||||||
|
|
||||||
|
targets := autoscaler.searchScaleTargets(hras, f)
|
||||||
|
|
||||||
|
n := len(targets)
|
||||||
|
|
||||||
|
if n == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if n > 1 {
|
||||||
|
var scaleTargetIDs []string
|
||||||
|
|
||||||
|
for _, t := range targets {
|
||||||
|
scaleTargetIDs = append(scaleTargetIDs, t.HorizontalRunnerAutoscaler.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
autoscaler.Log.Info(
|
||||||
|
"Found too many scale targets: "+
|
||||||
|
"It must be exactly one to avoid ambiguity. "+
|
||||||
|
"Either set Namespace for the webhook-based autoscaler to let it only find HRAs in the namespace, "+
|
||||||
|
"or update Repository or Organization fields in your RunnerDeployment resources to fix the ambiguity.",
|
||||||
|
"scaleTargets", strings.Join(scaleTargetIDs, ","))
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &targets[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleUpTarget(ctx context.Context, log logr.Logger, repo, owner, ownerType string, f func(v1alpha1.ScaleUpTrigger) bool) (*ScaleTarget, error) {
|
||||||
|
repositoryRunnerKey := owner + "/" + repo
|
||||||
|
|
||||||
|
if target, err := autoscaler.getScaleTarget(ctx, repositoryRunnerKey, f); err != nil {
|
||||||
|
log.Info("finding repository-wide runner", "repository", repositoryRunnerKey)
|
||||||
|
return nil, err
|
||||||
|
} else if target != nil {
|
||||||
|
log.Info("scale up target is repository-wide runners", "repository", repo)
|
||||||
|
return target, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if ownerType == "User" {
|
||||||
|
log.V(1).Info("no repository runner found", "organization", owner)
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if target, err := autoscaler.getScaleTarget(ctx, owner, f); err != nil {
|
||||||
|
log.Info("finding organizational runner", "organization", owner)
|
||||||
|
return nil, err
|
||||||
|
} else if target != nil {
|
||||||
|
log.Info("scale up target is organizational runners", "organization", owner)
|
||||||
|
return target, nil
|
||||||
|
} else {
|
||||||
|
log.V(1).Info("no repository runner or organizational runner found",
|
||||||
|
"repository", repositoryRunnerKey,
|
||||||
|
"organization", owner,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) tryScaleUp(ctx context.Context, target *ScaleTarget) error {
|
||||||
|
if target == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
copy := target.HorizontalRunnerAutoscaler.DeepCopy()
|
||||||
|
|
||||||
|
amount := 1
|
||||||
|
|
||||||
|
if target.ScaleUpTrigger.Amount > 0 {
|
||||||
|
amount = target.ScaleUpTrigger.Amount
|
||||||
|
}
|
||||||
|
|
||||||
|
capacityReservations := getValidCapacityReservations(copy)
|
||||||
|
|
||||||
|
copy.Spec.CapacityReservations = append(capacityReservations, v1alpha1.CapacityReservation{
|
||||||
|
ExpirationTime: metav1.Time{Time: time.Now().Add(target.ScaleUpTrigger.Duration.Duration)},
|
||||||
|
Replicas: amount,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err := autoscaler.Client.Patch(ctx, copy, client.MergeFrom(&target.HorizontalRunnerAutoscaler)); err != nil {
|
||||||
|
return fmt.Errorf("patching horizontalrunnerautoscaler to add capacity reservation: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getValidCapacityReservations(autoscaler *v1alpha1.HorizontalRunnerAutoscaler) []v1alpha1.CapacityReservation {
|
||||||
|
var capacityReservations []v1alpha1.CapacityReservation
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
for _, reservation := range autoscaler.Spec.CapacityReservations {
|
||||||
|
if reservation.ExpirationTime.Time.After(now) {
|
||||||
|
capacityReservations = append(capacityReservations, reservation)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return capacityReservations
|
||||||
|
}
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
|
name := "webhookbasedautoscaler"
|
||||||
|
if autoscaler.Name != "" {
|
||||||
|
name = autoscaler.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
autoscaler.Recorder = mgr.GetEventRecorderFor(name)
|
||||||
|
|
||||||
|
if err := mgr.GetFieldIndexer().IndexField(&v1alpha1.HorizontalRunnerAutoscaler{}, scaleTargetKey, func(rawObj runtime.Object) []string {
|
||||||
|
hra := rawObj.(*v1alpha1.HorizontalRunnerAutoscaler)
|
||||||
|
|
||||||
|
if hra.Spec.ScaleTargetRef.Name == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var rd v1alpha1.RunnerDeployment
|
||||||
|
|
||||||
|
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return []string{rd.Spec.Template.Spec.Repository, rd.Spec.Template.Spec.Organization}
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
|
For(&v1alpha1.HorizontalRunnerAutoscaler{}).
|
||||||
|
Named(name).
|
||||||
|
Complete(autoscaler)
|
||||||
|
}
|
||||||
@@ -0,0 +1,43 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/google/go-github/v33/github"
|
||||||
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
"github.com/summerwind/actions-runner-controller/pkg/actionsglob"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchCheckRunEvent(event *github.CheckRunEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
return func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
g := scaleUpTrigger.GitHubEvent
|
||||||
|
|
||||||
|
if g == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
cr := g.CheckRun
|
||||||
|
|
||||||
|
if cr == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !matchTriggerConditionAgainstEvent(cr.Types, event.Action) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if cr.Status != "" && (event.CheckRun == nil || event.CheckRun.Status == nil || *event.CheckRun.Status != cr.Status) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if checkRun := event.CheckRun; checkRun != nil && len(cr.Names) > 0 {
|
||||||
|
for _, pat := range cr.Names {
|
||||||
|
if r := actionsglob.Match(pat, checkRun.GetName()); r {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,32 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/google/go-github/v33/github"
|
||||||
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPullRequestEvent(event *github.PullRequestEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
return func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
g := scaleUpTrigger.GitHubEvent
|
||||||
|
|
||||||
|
if g == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
pr := g.PullRequest
|
||||||
|
|
||||||
|
if pr == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !matchTriggerConditionAgainstEvent(pr.Types, event.Action) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !matchTriggerConditionAgainstEvent(pr.Branches, event.PullRequest.Base.Ref) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
24
controllers/horizontal_runner_autoscaler_webhook_on_push.go
Normal file
24
controllers/horizontal_runner_autoscaler_webhook_on_push.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/google/go-github/v33/github"
|
||||||
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPushEvent(event *github.PushEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
return func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
g := scaleUpTrigger.GitHubEvent
|
||||||
|
|
||||||
|
if g == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
push := g.Push
|
||||||
|
|
||||||
|
if push == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
314
controllers/horizontal_runner_autoscaler_webhook_test.go
Normal file
314
controllers/horizontal_runner_autoscaler_webhook_test.go
Normal file
@@ -0,0 +1,314 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"github.com/google/go-github/v33/github"
|
||||||
|
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
sc = runtime.NewScheme()
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
_ = clientgoscheme.AddToScheme(sc)
|
||||||
|
_ = actionsv1alpha1.AddToScheme(sc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOrgWebhookCheckRun(t *testing.T) {
|
||||||
|
f, err := os.Open("testdata/org_webhook_check_run_payload.json")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not open the fixture: %s", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
var e github.CheckRunEvent
|
||||||
|
if err := json.NewDecoder(f).Decode(&e); err != nil {
|
||||||
|
t.Fatalf("invalid json: %s", err)
|
||||||
|
}
|
||||||
|
testServer(t,
|
||||||
|
"check_run",
|
||||||
|
&e,
|
||||||
|
200,
|
||||||
|
"no horizontalrunnerautoscaler to scale for this github event",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRepoWebhookCheckRun(t *testing.T) {
|
||||||
|
f, err := os.Open("testdata/repo_webhook_check_run_payload.json")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not open the fixture: %s", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
var e github.CheckRunEvent
|
||||||
|
if err := json.NewDecoder(f).Decode(&e); err != nil {
|
||||||
|
t.Fatalf("invalid json: %s", err)
|
||||||
|
}
|
||||||
|
testServer(t,
|
||||||
|
"check_run",
|
||||||
|
&e,
|
||||||
|
200,
|
||||||
|
"no horizontalrunnerautoscaler to scale for this github event",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWebhookPullRequest(t *testing.T) {
|
||||||
|
testServer(t,
|
||||||
|
"pull_request",
|
||||||
|
&github.PullRequestEvent{
|
||||||
|
PullRequest: &github.PullRequest{
|
||||||
|
Base: &github.PullRequestBranch{
|
||||||
|
Ref: github.String("main"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Repo: &github.Repository{
|
||||||
|
Name: github.String("myorg/myrepo"),
|
||||||
|
Organization: &github.Organization{
|
||||||
|
Name: github.String("myorg"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: github.String("created"),
|
||||||
|
},
|
||||||
|
200,
|
||||||
|
"no horizontalrunnerautoscaler to scale for this github event",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWebhookPush(t *testing.T) {
|
||||||
|
testServer(t,
|
||||||
|
"push",
|
||||||
|
&github.PushEvent{
|
||||||
|
Repo: &github.PushEventRepository{
|
||||||
|
Name: github.String("myrepo"),
|
||||||
|
Organization: github.String("myorg"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
200,
|
||||||
|
"no horizontalrunnerautoscaler to scale for this github event",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWebhookPing(t *testing.T) {
|
||||||
|
testServer(t,
|
||||||
|
"ping",
|
||||||
|
&github.PingEvent{
|
||||||
|
Zen: github.String("zen"),
|
||||||
|
},
|
||||||
|
200,
|
||||||
|
"pong",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetRequest(t *testing.T) {
|
||||||
|
hra := HorizontalRunnerAutoscalerGitHubWebhook{}
|
||||||
|
request, _ := http.NewRequest(http.MethodGet, "/", nil)
|
||||||
|
recorder := httptest.ResponseRecorder{}
|
||||||
|
|
||||||
|
hra.Handle(&recorder, request)
|
||||||
|
response := recorder.Result()
|
||||||
|
|
||||||
|
if response.StatusCode != http.StatusOK {
|
||||||
|
t.Errorf("want %d, got %d", http.StatusOK, response.StatusCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetValidCapacityReservations(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
hra := &actionsv1alpha1.HorizontalRunnerAutoscaler{
|
||||||
|
Spec: actionsv1alpha1.HorizontalRunnerAutoscalerSpec{
|
||||||
|
CapacityReservations: []actionsv1alpha1.CapacityReservation{
|
||||||
|
{
|
||||||
|
ExpirationTime: metav1.Time{Time: now.Add(-time.Second)},
|
||||||
|
Replicas: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ExpirationTime: metav1.Time{Time: now},
|
||||||
|
Replicas: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ExpirationTime: metav1.Time{Time: now.Add(time.Second)},
|
||||||
|
Replicas: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
revs := getValidCapacityReservations(hra)
|
||||||
|
|
||||||
|
var count int
|
||||||
|
|
||||||
|
for _, r := range revs {
|
||||||
|
count += r.Replicas
|
||||||
|
}
|
||||||
|
|
||||||
|
want := 3
|
||||||
|
|
||||||
|
if count != want {
|
||||||
|
t.Errorf("want %d, got %d", want, count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func installTestLogger(webhook *HorizontalRunnerAutoscalerGitHubWebhook) *bytes.Buffer {
|
||||||
|
logs := &bytes.Buffer{}
|
||||||
|
|
||||||
|
log := testLogger{
|
||||||
|
name: "testlog",
|
||||||
|
writer: logs,
|
||||||
|
}
|
||||||
|
|
||||||
|
webhook.Log = &log
|
||||||
|
|
||||||
|
return logs
|
||||||
|
}
|
||||||
|
|
||||||
|
func testServer(t *testing.T, eventType string, event interface{}, wantCode int, wantBody string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
hraWebhook := &HorizontalRunnerAutoscalerGitHubWebhook{}
|
||||||
|
|
||||||
|
var initObjs []runtime.Object
|
||||||
|
|
||||||
|
client := fake.NewFakeClientWithScheme(sc, initObjs...)
|
||||||
|
|
||||||
|
logs := installTestLogger(hraWebhook)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if t.Failed() {
|
||||||
|
t.Logf("diagnostics: %s", logs.String())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
hraWebhook.Client = client
|
||||||
|
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
mux.HandleFunc("/", hraWebhook.Handle)
|
||||||
|
|
||||||
|
server := httptest.NewServer(mux)
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
|
resp, err := sendWebhook(server, eventType, event)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if resp != nil {
|
||||||
|
resp.Body.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if resp.StatusCode != wantCode {
|
||||||
|
t.Error("status:", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
respBody, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(respBody) != wantBody {
|
||||||
|
t.Fatal("body:", string(respBody))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sendWebhook(server *httptest.Server, eventType string, event interface{}) (*http.Response, error) {
|
||||||
|
jsonBuf := &bytes.Buffer{}
|
||||||
|
enc := json.NewEncoder(jsonBuf)
|
||||||
|
enc.SetIndent(" ", "")
|
||||||
|
err := enc.Encode(event)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("[bug in test] encoding event to json: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
reqBody := jsonBuf.Bytes()
|
||||||
|
|
||||||
|
u, err := url.Parse(server.URL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing server url: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &http.Request{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: u,
|
||||||
|
Header: map[string][]string{
|
||||||
|
"X-GitHub-Event": {eventType},
|
||||||
|
"Content-Type": {"application/json"},
|
||||||
|
},
|
||||||
|
Body: ioutil.NopCloser(bytes.NewBuffer(reqBody)),
|
||||||
|
}
|
||||||
|
|
||||||
|
return http.DefaultClient.Do(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// testLogger is a sample logr.Logger that logs in-memory.
|
||||||
|
// It's only for testing log outputs.
|
||||||
|
type testLogger struct {
|
||||||
|
name string
|
||||||
|
keyValues map[string]interface{}
|
||||||
|
|
||||||
|
writer io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ logr.Logger = &testLogger{}
|
||||||
|
|
||||||
|
func (l *testLogger) Info(msg string, kvs ...interface{}) {
|
||||||
|
fmt.Fprintf(l.writer, "%s] %s\t", l.name, msg)
|
||||||
|
for k, v := range l.keyValues {
|
||||||
|
fmt.Fprintf(l.writer, "%s=%+v ", k, v)
|
||||||
|
}
|
||||||
|
for i := 0; i < len(kvs); i += 2 {
|
||||||
|
fmt.Fprintf(l.writer, "%s=%+v ", kvs[i], kvs[i+1])
|
||||||
|
}
|
||||||
|
fmt.Fprintf(l.writer, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *testLogger) Enabled() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *testLogger) Error(err error, msg string, kvs ...interface{}) {
|
||||||
|
kvs = append(kvs, "error", err)
|
||||||
|
l.Info(msg, kvs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *testLogger) V(_ int) logr.InfoLogger {
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *testLogger) WithName(name string) logr.Logger {
|
||||||
|
return &testLogger{
|
||||||
|
name: l.name + "." + name,
|
||||||
|
keyValues: l.keyValues,
|
||||||
|
writer: l.writer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *testLogger) WithValues(kvs ...interface{}) logr.Logger {
|
||||||
|
newMap := make(map[string]interface{}, len(l.keyValues)+len(kvs)/2)
|
||||||
|
for k, v := range l.keyValues {
|
||||||
|
newMap[k] = v
|
||||||
|
}
|
||||||
|
for i := 0; i < len(kvs); i += 2 {
|
||||||
|
newMap[kvs[i].(string)] = kvs[i+1]
|
||||||
|
}
|
||||||
|
return &testLogger{
|
||||||
|
name: l.name,
|
||||||
|
keyValues: newMap,
|
||||||
|
writer: l.writer,
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -18,8 +18,12 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
"github.com/summerwind/actions-runner-controller/github"
|
"github.com/summerwind/actions-runner-controller/github"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
|
||||||
@@ -29,10 +33,10 @@ import (
|
|||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
"github.com/summerwind/actions-runner-controller/controllers/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -46,8 +50,13 @@ type HorizontalRunnerAutoscalerReconciler struct {
|
|||||||
Log logr.Logger
|
Log logr.Logger
|
||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
|
|
||||||
|
CacheDuration time.Duration
|
||||||
|
Name string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const defaultReplicas = 1
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerdeployments,verbs=get;list;watch;update;patch
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerdeployments,verbs=get;list;watch;update;patch
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers,verbs=get;list;watch;create;update;patch;delete
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers/finalizers,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||||
@@ -67,6 +76,8 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metrics.SetHorizontalRunnerAutoscalerSpec(hra.ObjectMeta, hra.Spec)
|
||||||
|
|
||||||
var rd v1alpha1.RunnerDeployment
|
var rd v1alpha1.RunnerDeployment
|
||||||
if err := r.Get(ctx, types.NamespacedName{
|
if err := r.Get(ctx, types.NamespacedName{
|
||||||
Namespace: req.Namespace,
|
Namespace: req.Namespace,
|
||||||
@@ -79,7 +90,16 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
replicas, err := r.computeReplicas(rd, hra)
|
now := time.Now()
|
||||||
|
|
||||||
|
minReplicas, active, upcoming, err := r.getMinReplicas(log, now, hra)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err, "Could not compute min replicas")
|
||||||
|
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
newDesiredReplicas, computedReplicas, computedReplicasFromCache, err := r.computeReplicasWithCache(log, now, rd, hra, minReplicas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
||||||
|
|
||||||
@@ -88,62 +108,234 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
const defaultReplicas = 1
|
|
||||||
|
|
||||||
currentDesiredReplicas := getIntOrDefault(rd.Spec.Replicas, defaultReplicas)
|
currentDesiredReplicas := getIntOrDefault(rd.Spec.Replicas, defaultReplicas)
|
||||||
newDesiredReplicas := getIntOrDefault(replicas, defaultReplicas)
|
|
||||||
|
|
||||||
// Please add more conditions that we can in-place update the newest runnerreplicaset without disruption
|
// Please add more conditions that we can in-place update the newest runnerreplicaset without disruption
|
||||||
if currentDesiredReplicas != newDesiredReplicas {
|
if currentDesiredReplicas != newDesiredReplicas {
|
||||||
copy := rd.DeepCopy()
|
copy := rd.DeepCopy()
|
||||||
copy.Spec.Replicas = &newDesiredReplicas
|
copy.Spec.Replicas = &newDesiredReplicas
|
||||||
|
|
||||||
if err := r.Client.Update(ctx, copy); err != nil {
|
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
|
||||||
log.Error(err, "Failed to update runnerderployment resource")
|
return ctrl.Result{}, fmt.Errorf("patching runnerdeployment to have %d replicas: %w", newDesiredReplicas, err)
|
||||||
|
}
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if hra.Status.DesiredReplicas == nil || *hra.Status.DesiredReplicas != *replicas {
|
|
||||||
updated := hra.DeepCopy()
|
updated := hra.DeepCopy()
|
||||||
|
|
||||||
if (hra.Status.DesiredReplicas == nil && *replicas > 1) ||
|
if hra.Status.DesiredReplicas == nil || *hra.Status.DesiredReplicas != newDesiredReplicas {
|
||||||
(hra.Status.DesiredReplicas != nil && *replicas > *hra.Status.DesiredReplicas) {
|
if (hra.Status.DesiredReplicas == nil && newDesiredReplicas > 1) ||
|
||||||
|
(hra.Status.DesiredReplicas != nil && newDesiredReplicas > *hra.Status.DesiredReplicas) {
|
||||||
|
|
||||||
updated.Status.LastSuccessfulScaleOutTime = &metav1.Time{Time: time.Now()}
|
updated.Status.LastSuccessfulScaleOutTime = &metav1.Time{Time: time.Now()}
|
||||||
}
|
}
|
||||||
|
|
||||||
updated.Status.DesiredReplicas = replicas
|
updated.Status.DesiredReplicas = &newDesiredReplicas
|
||||||
|
}
|
||||||
|
|
||||||
if err := r.Status().Update(ctx, updated); err != nil {
|
if computedReplicasFromCache == nil {
|
||||||
log.Error(err, "Failed to update horizontalrunnerautoscaler status")
|
cacheEntries := getValidCacheEntries(updated, now)
|
||||||
|
|
||||||
return ctrl.Result{}, err
|
var cacheDuration time.Duration
|
||||||
|
|
||||||
|
if r.CacheDuration > 0 {
|
||||||
|
cacheDuration = r.CacheDuration
|
||||||
|
} else {
|
||||||
|
cacheDuration = 10 * time.Minute
|
||||||
|
}
|
||||||
|
|
||||||
|
updated.Status.CacheEntries = append(cacheEntries, v1alpha1.CacheEntry{
|
||||||
|
Key: v1alpha1.CacheEntryKeyDesiredReplicas,
|
||||||
|
Value: computedReplicas,
|
||||||
|
ExpirationTime: metav1.Time{Time: time.Now().Add(cacheDuration)},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
var overridesSummary string
|
||||||
|
|
||||||
|
if (active != nil && upcoming == nil) || (active != nil && upcoming != nil && active.Period.EndTime.Before(upcoming.Period.StartTime)) {
|
||||||
|
after := defaultReplicas
|
||||||
|
if hra.Spec.MinReplicas != nil && *hra.Spec.MinReplicas >= 0 {
|
||||||
|
after = *hra.Spec.MinReplicas
|
||||||
|
}
|
||||||
|
|
||||||
|
overridesSummary = fmt.Sprintf("min=%d time=%s", after, active.Period.EndTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
if active == nil && upcoming != nil || (active != nil && upcoming != nil && active.Period.EndTime.After(upcoming.Period.StartTime)) {
|
||||||
|
if upcoming.ScheduledOverride.MinReplicas != nil {
|
||||||
|
overridesSummary = fmt.Sprintf("min=%d time=%s", *upcoming.ScheduledOverride.MinReplicas, upcoming.Period.StartTime)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if overridesSummary != "" {
|
||||||
|
updated.Status.ScheduledOverridesSummary = &overridesSummary
|
||||||
|
} else {
|
||||||
|
updated.Status.ScheduledOverridesSummary = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(hra.Status, updated.Status) {
|
||||||
|
metrics.SetHorizontalRunnerAutoscalerStatus(updated.ObjectMeta, updated.Status)
|
||||||
|
|
||||||
|
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&hra)); err != nil {
|
||||||
|
return ctrl.Result{}, fmt.Errorf("patching horizontalrunnerautoscaler status: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getValidCacheEntries(hra *v1alpha1.HorizontalRunnerAutoscaler, now time.Time) []v1alpha1.CacheEntry {
|
||||||
|
var cacheEntries []v1alpha1.CacheEntry
|
||||||
|
|
||||||
|
for _, ent := range hra.Status.CacheEntries {
|
||||||
|
if ent.ExpirationTime.After(now) {
|
||||||
|
cacheEntries = append(cacheEntries, ent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return cacheEntries
|
||||||
|
}
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *HorizontalRunnerAutoscalerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
r.Recorder = mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller")
|
name := "horizontalrunnerautoscaler-controller"
|
||||||
|
if r.Name != "" {
|
||||||
|
name = r.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||||
|
|
||||||
return ctrl.NewControllerManagedBy(mgr).
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
For(&v1alpha1.HorizontalRunnerAutoscaler{}).
|
For(&v1alpha1.HorizontalRunnerAutoscaler{}).
|
||||||
|
Named(name).
|
||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicas(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
type Override struct {
|
||||||
var computedReplicas *int
|
ScheduledOverride v1alpha1.ScheduledOverride
|
||||||
|
Period Period
|
||||||
replicas, err := r.determineDesiredReplicas(rd, hra)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *HorizontalRunnerAutoscalerReconciler) matchScheduledOverrides(log logr.Logger, now time.Time, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, *Override, *Override, error) {
|
||||||
|
var minReplicas *int
|
||||||
|
var active, upcoming *Override
|
||||||
|
|
||||||
|
for _, o := range hra.Spec.ScheduledOverrides {
|
||||||
|
log.V(1).Info(
|
||||||
|
"Checking scheduled override",
|
||||||
|
"now", now,
|
||||||
|
"startTime", o.StartTime,
|
||||||
|
"endTime", o.EndTime,
|
||||||
|
"frequency", o.RecurrenceRule.Frequency,
|
||||||
|
"untilTime", o.RecurrenceRule.UntilTime,
|
||||||
|
)
|
||||||
|
|
||||||
|
a, u, err := MatchSchedule(
|
||||||
|
now, o.StartTime.Time, o.EndTime.Time,
|
||||||
|
RecurrenceRule{
|
||||||
|
Frequency: o.RecurrenceRule.Frequency,
|
||||||
|
UntilTime: o.RecurrenceRule.UntilTime.Time,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return minReplicas, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use the first when there are two or more active scheduled overrides,
|
||||||
|
// as the spec defines that the earlier scheduled override is prioritized higher than later ones.
|
||||||
|
if a != nil && active == nil {
|
||||||
|
active = &Override{Period: *a, ScheduledOverride: o}
|
||||||
|
|
||||||
|
if o.MinReplicas != nil {
|
||||||
|
minReplicas = o.MinReplicas
|
||||||
|
|
||||||
|
log.V(1).Info(
|
||||||
|
"Found active scheduled override",
|
||||||
|
"activeStartTime", a.StartTime,
|
||||||
|
"activeEndTime", a.EndTime,
|
||||||
|
"activeMinReplicas", minReplicas,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if u != nil && (upcoming == nil || u.StartTime.Before(upcoming.Period.StartTime)) {
|
||||||
|
upcoming = &Override{Period: *u, ScheduledOverride: o}
|
||||||
|
|
||||||
|
log.V(1).Info(
|
||||||
|
"Found upcoming scheduled override",
|
||||||
|
"upcomingStartTime", u.StartTime,
|
||||||
|
"upcomingEndTime", u.EndTime,
|
||||||
|
"upcomingMinReplicas", o.MinReplicas,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return minReplicas, active, upcoming, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HorizontalRunnerAutoscalerReconciler) getMinReplicas(log logr.Logger, now time.Time, hra v1alpha1.HorizontalRunnerAutoscaler) (int, *Override, *Override, error) {
|
||||||
|
minReplicas := defaultReplicas
|
||||||
|
if hra.Spec.MinReplicas != nil && *hra.Spec.MinReplicas >= 0 {
|
||||||
|
minReplicas = *hra.Spec.MinReplicas
|
||||||
|
}
|
||||||
|
|
||||||
|
m, active, upcoming, err := r.matchScheduledOverrides(log, now, hra)
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, nil, err
|
||||||
|
} else if m != nil {
|
||||||
|
minReplicas = *m
|
||||||
|
}
|
||||||
|
|
||||||
|
return minReplicas, active, upcoming, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(log logr.Logger, now time.Time, rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler, minReplicas int) (int, int, *int, error) {
|
||||||
|
var suggestedReplicas int
|
||||||
|
|
||||||
|
suggestedReplicasFromCache := r.fetchSuggestedReplicasFromCache(hra)
|
||||||
|
|
||||||
|
var cached *int
|
||||||
|
|
||||||
|
if suggestedReplicasFromCache != nil {
|
||||||
|
cached = suggestedReplicasFromCache
|
||||||
|
|
||||||
|
if cached == nil {
|
||||||
|
suggestedReplicas = minReplicas
|
||||||
|
} else {
|
||||||
|
suggestedReplicas = *cached
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
v, err := r.suggestDesiredReplicas(rd, hra)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if v == nil {
|
||||||
|
suggestedReplicas = minReplicas
|
||||||
|
} else {
|
||||||
|
suggestedReplicas = *v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var reserved int
|
||||||
|
|
||||||
|
for _, reservation := range hra.Spec.CapacityReservations {
|
||||||
|
if reservation.ExpirationTime.Time.After(now) {
|
||||||
|
reserved += reservation.Replicas
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
newDesiredReplicas := suggestedReplicas + reserved
|
||||||
|
|
||||||
|
if newDesiredReplicas < minReplicas {
|
||||||
|
newDesiredReplicas = minReplicas
|
||||||
|
} else if hra.Spec.MaxReplicas != nil && newDesiredReplicas > *hra.Spec.MaxReplicas {
|
||||||
|
newDesiredReplicas = *hra.Spec.MaxReplicas
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Delay scaling-down for ScaleDownDelaySecondsAfterScaleUp or DefaultScaleDownDelay
|
||||||
|
//
|
||||||
|
|
||||||
var scaleDownDelay time.Duration
|
var scaleDownDelay time.Duration
|
||||||
|
|
||||||
if hra.Spec.ScaleDownDelaySecondsAfterScaleUp != nil {
|
if hra.Spec.ScaleDownDelaySecondsAfterScaleUp != nil {
|
||||||
@@ -152,17 +344,50 @@ func (r *HorizontalRunnerAutoscalerReconciler) computeReplicas(rd v1alpha1.Runne
|
|||||||
scaleDownDelay = DefaultScaleDownDelay
|
scaleDownDelay = DefaultScaleDownDelay
|
||||||
}
|
}
|
||||||
|
|
||||||
now := time.Now()
|
var scaleDownDelayUntil *time.Time
|
||||||
|
|
||||||
if hra.Status.DesiredReplicas == nil ||
|
if hra.Status.DesiredReplicas == nil ||
|
||||||
*hra.Status.DesiredReplicas < *replicas ||
|
*hra.Status.DesiredReplicas < newDesiredReplicas ||
|
||||||
hra.Status.LastSuccessfulScaleOutTime == nil ||
|
hra.Status.LastSuccessfulScaleOutTime == nil {
|
||||||
hra.Status.LastSuccessfulScaleOutTime.Add(scaleDownDelay).Before(now) {
|
|
||||||
|
|
||||||
computedReplicas = replicas
|
} else if hra.Status.LastSuccessfulScaleOutTime != nil {
|
||||||
|
t := hra.Status.LastSuccessfulScaleOutTime.Add(scaleDownDelay)
|
||||||
|
|
||||||
|
// ScaleDownDelay is not passed
|
||||||
|
if t.After(now) {
|
||||||
|
scaleDownDelayUntil = &t
|
||||||
|
newDesiredReplicas = *hra.Status.DesiredReplicas
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
computedReplicas = hra.Status.DesiredReplicas
|
newDesiredReplicas = *hra.Status.DesiredReplicas
|
||||||
}
|
}
|
||||||
|
|
||||||
return computedReplicas, nil
|
//
|
||||||
|
// Logs various numbers for monitoring and debugging purpose
|
||||||
|
//
|
||||||
|
|
||||||
|
kvs := []interface{}{
|
||||||
|
"suggested", suggestedReplicas,
|
||||||
|
"reserved", reserved,
|
||||||
|
"min", minReplicas,
|
||||||
|
}
|
||||||
|
|
||||||
|
if cached != nil {
|
||||||
|
kvs = append(kvs, "cached", *cached)
|
||||||
|
}
|
||||||
|
|
||||||
|
if scaleDownDelayUntil != nil {
|
||||||
|
kvs = append(kvs, "last_scale_up_time", *hra.Status.LastSuccessfulScaleOutTime)
|
||||||
|
kvs = append(kvs, "scale_down_delay_until", scaleDownDelayUntil)
|
||||||
|
}
|
||||||
|
|
||||||
|
if maxReplicas := hra.Spec.MaxReplicas; maxReplicas != nil {
|
||||||
|
kvs = append(kvs, "max", *maxReplicas)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.V(1).Info(fmt.Sprintf("Calculated desired replicas of %d", newDesiredReplicas),
|
||||||
|
kvs...,
|
||||||
|
)
|
||||||
|
|
||||||
|
return newDesiredReplicas, suggestedReplicas, suggestedReplicasFromCache, nil
|
||||||
}
|
}
|
||||||
|
|||||||
49
controllers/horizontalrunnerautoscaler_controller_test.go
Normal file
49
controllers/horizontalrunnerautoscaler_controller_test.go
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetValidCacheEntries(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
hra := &actionsv1alpha1.HorizontalRunnerAutoscaler{
|
||||||
|
Status: actionsv1alpha1.HorizontalRunnerAutoscalerStatus{
|
||||||
|
CacheEntries: []actionsv1alpha1.CacheEntry{
|
||||||
|
{
|
||||||
|
Key: "foo",
|
||||||
|
Value: 1,
|
||||||
|
ExpirationTime: metav1.Time{Time: now.Add(-time.Second)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "foo",
|
||||||
|
Value: 2,
|
||||||
|
ExpirationTime: metav1.Time{Time: now},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "foo",
|
||||||
|
Value: 3,
|
||||||
|
ExpirationTime: metav1.Time{Time: now.Add(time.Second)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
revs := getValidCacheEntries(hra, now)
|
||||||
|
|
||||||
|
counts := map[string]int{}
|
||||||
|
|
||||||
|
for _, r := range revs {
|
||||||
|
counts[r.Key] += r.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
want := map[string]int{"foo": 3}
|
||||||
|
|
||||||
|
if d := cmp.Diff(want, counts); d != "" {
|
||||||
|
t.Errorf("%s", d)
|
||||||
|
}
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
67
controllers/metrics/horizontalrunnerautoscaler.go
Normal file
67
controllers/metrics/horizontalrunnerautoscaler.go
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
hraName = "horizontalrunnerautoscaler"
|
||||||
|
hraNamespace = "namespace"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
horizontalRunnerAutoscalerMetrics = []prometheus.Collector{
|
||||||
|
horizontalRunnerAutoscalerMinReplicas,
|
||||||
|
horizontalRunnerAutoscalerMaxReplicas,
|
||||||
|
horizontalRunnerAutoscalerDesiredReplicas,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
horizontalRunnerAutoscalerMinReplicas = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "horizontalrunnerautoscaler_spec_min_replicas",
|
||||||
|
Help: "minReplicas of HorizontalRunnerAutoscaler",
|
||||||
|
},
|
||||||
|
[]string{hraName, hraNamespace},
|
||||||
|
)
|
||||||
|
horizontalRunnerAutoscalerMaxReplicas = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "horizontalrunnerautoscaler_spec_max_replicas",
|
||||||
|
Help: "maxReplicas of HorizontalRunnerAutoscaler",
|
||||||
|
},
|
||||||
|
[]string{hraName, hraNamespace},
|
||||||
|
)
|
||||||
|
horizontalRunnerAutoscalerDesiredReplicas = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "horizontalrunnerautoscaler_status_desired_replicas",
|
||||||
|
Help: "desiredReplicas of HorizontalRunnerAutoscaler",
|
||||||
|
},
|
||||||
|
[]string{hraName, hraNamespace},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
func SetHorizontalRunnerAutoscalerSpec(o metav1.ObjectMeta, spec v1alpha1.HorizontalRunnerAutoscalerSpec) {
|
||||||
|
labels := prometheus.Labels{
|
||||||
|
hraName: o.Name,
|
||||||
|
hraNamespace: o.Namespace,
|
||||||
|
}
|
||||||
|
if spec.MaxReplicas != nil {
|
||||||
|
horizontalRunnerAutoscalerMaxReplicas.With(labels).Set(float64(*spec.MaxReplicas))
|
||||||
|
}
|
||||||
|
if spec.MinReplicas != nil {
|
||||||
|
horizontalRunnerAutoscalerMinReplicas.With(labels).Set(float64(*spec.MinReplicas))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetHorizontalRunnerAutoscalerStatus(o metav1.ObjectMeta, status v1alpha1.HorizontalRunnerAutoscalerStatus) {
|
||||||
|
labels := prometheus.Labels{
|
||||||
|
hraName: o.Name,
|
||||||
|
hraNamespace: o.Namespace,
|
||||||
|
}
|
||||||
|
if status.DesiredReplicas != nil {
|
||||||
|
horizontalRunnerAutoscalerDesiredReplicas.With(labels).Set(float64(*status.DesiredReplicas))
|
||||||
|
}
|
||||||
|
}
|
||||||
14
controllers/metrics/metrics.go
Normal file
14
controllers/metrics/metrics.go
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
// Package metrics provides the metrics of custom resources such as HRA.
|
||||||
|
//
|
||||||
|
// This depends on the metrics exporter of kubebuilder.
|
||||||
|
// See https://book.kubebuilder.io/reference/metrics.html for details.
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
metrics.Registry.MustRegister(runnerDeploymentMetrics...)
|
||||||
|
metrics.Registry.MustRegister(horizontalRunnerAutoscalerMetrics...)
|
||||||
|
}
|
||||||
37
controllers/metrics/runnerdeployment.go
Normal file
37
controllers/metrics/runnerdeployment.go
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
rdName = "runnerdeployment"
|
||||||
|
rdNamespace = "namespace"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
runnerDeploymentMetrics = []prometheus.Collector{
|
||||||
|
runnerDeploymentReplicas,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
runnerDeploymentReplicas = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "runnerdeployment_spec_replicas",
|
||||||
|
Help: "replicas of RunnerDeployment",
|
||||||
|
},
|
||||||
|
[]string{rdName, rdNamespace},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
func SetRunnerDeployment(rd v1alpha1.RunnerDeployment) {
|
||||||
|
labels := prometheus.Labels{
|
||||||
|
rdName: rd.Name,
|
||||||
|
rdNamespace: rd.Namespace,
|
||||||
|
}
|
||||||
|
if rd.Spec.Replicas != nil {
|
||||||
|
runnerDeploymentReplicas.With(labels).Set(float64(*rd.Spec.Replicas))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -18,12 +18,17 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/summerwind/actions-runner-controller/hash"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
gogithub "github.com/google/go-github/v33/github"
|
||||||
|
"github.com/summerwind/actions-runner-controller/hash"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
@@ -41,6 +46,11 @@ const (
|
|||||||
finalizerName = "runner.actions.summerwind.dev"
|
finalizerName = "runner.actions.summerwind.dev"
|
||||||
|
|
||||||
LabelKeyPodTemplateHash = "pod-template-hash"
|
LabelKeyPodTemplateHash = "pod-template-hash"
|
||||||
|
|
||||||
|
retryDelayOnGitHubAPIRateLimitError = 30 * time.Second
|
||||||
|
|
||||||
|
// This is an annotation internal to actions-runner-controller and can change in backward-incompatible ways
|
||||||
|
annotationKeyRegistrationOnly = "actions-runner-controller/registration-only"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RunnerReconciler reconciles a Runner object
|
// RunnerReconciler reconciles a Runner object
|
||||||
@@ -52,6 +62,9 @@ type RunnerReconciler struct {
|
|||||||
GitHubClient *github.Client
|
GitHubClient *github.Client
|
||||||
RunnerImage string
|
RunnerImage string
|
||||||
DockerImage string
|
DockerImage string
|
||||||
|
Name string
|
||||||
|
RegistrationRecheckInterval time.Duration
|
||||||
|
RegistrationRecheckJitter time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners,verbs=get;list;watch;create;update;patch;delete
|
||||||
@@ -95,9 +108,22 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
|
|
||||||
if removed {
|
if removed {
|
||||||
if len(runner.Status.Registration.Token) > 0 {
|
if len(runner.Status.Registration.Token) > 0 {
|
||||||
ok, err := r.unregisterRunner(ctx, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
ok, err := r.unregisterRunner(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err, "Failed to unregister runner")
|
if errors.Is(err, &gogithub.RateLimitError{}) {
|
||||||
|
// We log the underlying error when we failed calling GitHub API to list or unregisters,
|
||||||
|
// or the runner is still busy.
|
||||||
|
log.Error(
|
||||||
|
err,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"Failed to unregister runner due to GitHub API rate limits. Delaying retry for %s to avoid excessive GitHub API calls",
|
||||||
|
retryDelayOnGitHubAPIRateLimitError,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
return ctrl.Result{RequeueAfter: retryDelayOnGitHubAPIRateLimitError}, err
|
||||||
|
}
|
||||||
|
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -111,8 +137,8 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
newRunner := runner.DeepCopy()
|
newRunner := runner.DeepCopy()
|
||||||
newRunner.ObjectMeta.Finalizers = finalizers
|
newRunner.ObjectMeta.Finalizers = finalizers
|
||||||
|
|
||||||
if err := r.Update(ctx, newRunner); err != nil {
|
if err := r.Patch(ctx, newRunner, client.MergeFrom(&runner)); err != nil {
|
||||||
log.Error(err, "Failed to update runner")
|
log.Error(err, "Failed to update runner for finalizer removal")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -122,9 +148,37 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
registrationOnly := metav1.HasAnnotation(runner.ObjectMeta, annotationKeyRegistrationOnly)
|
||||||
|
if registrationOnly && runner.Status.Phase != "" {
|
||||||
|
// At this point we are sure that the registration-only runner has successfully configured and
|
||||||
|
// is of `offline` status, because we set runner.Status.Phase to that of the runner pod only after
|
||||||
|
// successful registration.
|
||||||
|
|
||||||
var pod corev1.Pod
|
var pod corev1.Pod
|
||||||
if err := r.Get(ctx, req.NamespacedName, &pod); err != nil {
|
if err := r.Get(ctx, req.NamespacedName, &pod); err != nil {
|
||||||
if !errors.IsNotFound(err) {
|
if !kerrors.IsNotFound(err) {
|
||||||
|
log.Info(fmt.Sprintf("Retrying soon as we failed to get registration-only runner pod: %v", err))
|
||||||
|
|
||||||
|
return ctrl.Result{Requeue: true}, nil
|
||||||
|
}
|
||||||
|
} else if err := r.Delete(ctx, &pod); err != nil {
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
|
log.Info(fmt.Sprintf("Retrying soon as we failed to delete registration-only runner pod: %v", err))
|
||||||
|
|
||||||
|
return ctrl.Result{Requeue: true}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Successfully deleted egistration-only runner pod to free node and cluster resource")
|
||||||
|
|
||||||
|
// Return here to not recreate the deleted pod, because recreating it is the waste of cluster and node resource,
|
||||||
|
// and also defeats the original purpose of scale-from/to-zero we're trying to implement by using the registration-only runner.
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var pod corev1.Pod
|
||||||
|
if err := r.Get(ctx, req.NamespacedName, &pod); err != nil {
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -141,35 +195,66 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := r.Create(ctx, &newPod); err != nil {
|
if err := r.Create(ctx, &newPod); err != nil {
|
||||||
|
if kerrors.IsAlreadyExists(err) {
|
||||||
|
// Gracefully handle pod-already-exists errors due to informer cache delay.
|
||||||
|
// Without this we got a few errors like the below on new runner pod:
|
||||||
|
// 2021-03-16T00:23:10.116Z ERROR controller-runtime.controller Reconciler error {"controller": "runner-controller", "request": "default/example-runnerdeploy-b2g2g-j4mcp", "error": "pods \"example-runnerdeploy-b2g2g-j4mcp\" already exists"}
|
||||||
|
log.Info(
|
||||||
|
"Failed to create pod due to AlreadyExists error. Probably this pod has been already created in previous reconcilation but is still not in the informer cache. Will retry on pod created. If it doesn't repeat, there's no problem",
|
||||||
|
)
|
||||||
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
log.Error(err, "Failed to create pod resource")
|
log.Error(err, "Failed to create pod resource")
|
||||||
|
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
r.Recorder.Event(&runner, corev1.EventTypeNormal, "PodCreated", fmt.Sprintf("Created pod '%s'", newPod.Name))
|
r.Recorder.Event(&runner, corev1.EventTypeNormal, "PodCreated", fmt.Sprintf("Created pod '%s'", newPod.Name))
|
||||||
log.Info("Created runner pod", "repository", runner.Spec.Repository)
|
log.Info("Created runner pod", "repository", runner.Spec.Repository)
|
||||||
} else {
|
} else {
|
||||||
|
if !pod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
deletionTimeout := 1 * time.Minute
|
||||||
|
currentTime := time.Now()
|
||||||
|
deletionDidTimeout := currentTime.Sub(pod.DeletionTimestamp.Add(deletionTimeout)) > 0
|
||||||
|
|
||||||
|
if deletionDidTimeout {
|
||||||
|
log.Info(
|
||||||
|
fmt.Sprintf("Failed to delete pod within %s. ", deletionTimeout)+
|
||||||
|
"This is typically the case when a Kubernetes node became unreachable "+
|
||||||
|
"and the kube controller started evicting nodes. Forcefully deleting the pod to not get stuck.",
|
||||||
|
"podDeletionTimestamp", pod.DeletionTimestamp,
|
||||||
|
"currentTime", currentTime,
|
||||||
|
"configuredDeletionTimeout", deletionTimeout,
|
||||||
|
)
|
||||||
|
|
||||||
|
var force int64 = 0
|
||||||
|
// forcefully delete runner as we would otherwise get stuck if the node stays unreachable
|
||||||
|
if err := r.Delete(ctx, &pod, &client.DeleteOptions{GracePeriodSeconds: &force}); err != nil {
|
||||||
|
// probably
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
|
log.Error(err, "Failed to forcefully delete pod resource ...")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
// forceful deletion finally succeeded
|
||||||
|
return ctrl.Result{Requeue: true}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Recorder.Event(&runner, corev1.EventTypeNormal, "PodDeleted", fmt.Sprintf("Forcefully deleted pod '%s'", pod.Name))
|
||||||
|
log.Info("Forcefully deleted runner pod", "repository", runner.Spec.Repository)
|
||||||
|
// give kube manager a little time to forcefully delete the stuck pod
|
||||||
|
return ctrl.Result{RequeueAfter: 3 * time.Second}, err
|
||||||
|
} else {
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// If pod has ended up succeeded we need to restart it
|
// If pod has ended up succeeded we need to restart it
|
||||||
// Happens e.g. when dind is in runner and run completes
|
// Happens e.g. when dind is in runner and run completes
|
||||||
restart := pod.Status.Phase == corev1.PodSucceeded
|
stopped := pod.Status.Phase == corev1.PodSucceeded
|
||||||
|
|
||||||
if !restart && runner.Status.Phase != string(pod.Status.Phase) {
|
|
||||||
updated := runner.DeepCopy()
|
|
||||||
updated.Status.Phase = string(pod.Status.Phase)
|
|
||||||
updated.Status.Reason = pod.Status.Reason
|
|
||||||
updated.Status.Message = pod.Status.Message
|
|
||||||
|
|
||||||
if err := r.Status().Update(ctx, updated); err != nil {
|
|
||||||
log.Error(err, "Failed to update runner status")
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctrl.Result{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if !pod.ObjectMeta.DeletionTimestamp.IsZero() {
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
|
if !stopped {
|
||||||
if pod.Status.Phase == corev1.PodRunning {
|
if pod.Status.Phase == corev1.PodRunning {
|
||||||
for _, status := range pod.Status.ContainerStatuses {
|
for _, status := range pod.Status.ContainerStatuses {
|
||||||
if status.Name != containerName {
|
if status.Name != containerName {
|
||||||
@@ -177,10 +262,22 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if status.State.Terminated != nil && status.State.Terminated.ExitCode == 0 {
|
if status.State.Terminated != nil && status.State.Terminated.ExitCode == 0 {
|
||||||
restart = true
|
stopped = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
restart := stopped
|
||||||
|
|
||||||
|
if registrationOnly && stopped {
|
||||||
|
restart = false
|
||||||
|
|
||||||
|
log.Info(
|
||||||
|
"Observed that registration-only runner for scaling-from-zero has successfully stopped. " +
|
||||||
|
"Unlike other pods, this one will be recreated only when runner spec changes.",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
if updated, err := r.updateRegistrationToken(ctx, runner); err != nil {
|
if updated, err := r.updateRegistrationToken(ctx, runner); err != nil {
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -194,10 +291,90 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
runnerBusy, err := r.isRunnerBusy(ctx, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
if registrationOnly {
|
||||||
|
newPod.Spec.Containers[0].Env = append(
|
||||||
|
newPod.Spec.Containers[0].Env,
|
||||||
|
corev1.EnvVar{
|
||||||
|
Name: "RUNNER_REGISTRATION_ONLY",
|
||||||
|
Value: "true",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
var registrationRecheckDelay time.Duration
|
||||||
|
|
||||||
|
// all checks done below only decide whether a restart is needed
|
||||||
|
// if a restart was already decided before, there is no need for the checks
|
||||||
|
// saving API calls and scary log messages
|
||||||
|
if !restart {
|
||||||
|
registrationCheckInterval := time.Minute
|
||||||
|
if r.RegistrationRecheckInterval > 0 {
|
||||||
|
registrationCheckInterval = r.RegistrationRecheckInterval
|
||||||
|
}
|
||||||
|
|
||||||
|
// We want to call ListRunners GitHub Actions API only once per runner per minute.
|
||||||
|
// This if block, in conjunction with:
|
||||||
|
// return ctrl.Result{RequeueAfter: registrationRecheckDelay}, nil
|
||||||
|
// achieves that.
|
||||||
|
if lastCheckTime := runner.Status.LastRegistrationCheckTime; lastCheckTime != nil {
|
||||||
|
nextCheckTime := lastCheckTime.Add(registrationCheckInterval)
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
// Requeue scheduled by RequeueAfter can happen a bit earlier (like dozens of milliseconds)
|
||||||
|
// so to avoid excessive, in-effective retry, we heuristically ignore the remaining delay in case it is
|
||||||
|
// shorter than 1s
|
||||||
|
requeueAfter := nextCheckTime.Sub(now) - time.Second
|
||||||
|
if requeueAfter > 0 {
|
||||||
|
log.Info(
|
||||||
|
fmt.Sprintf("Skipped registration check because it's deferred until %s. Retrying in %s at latest", nextCheckTime, requeueAfter),
|
||||||
|
"lastRegistrationCheckTime", lastCheckTime,
|
||||||
|
"registrationCheckInterval", registrationCheckInterval,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Without RequeueAfter, the controller may not retry on scheduled. Instead, it must wait until the
|
||||||
|
// next sync period passes, which can be too much later than nextCheckTime.
|
||||||
|
//
|
||||||
|
// We need to requeue on this reconcilation even though we have already scheduled the initial
|
||||||
|
// requeue previously with `return ctrl.Result{RequeueAfter: registrationRecheckDelay}, nil`.
|
||||||
|
// Apparently, the workqueue used by controller-runtime seems to deduplicate and resets the delay on
|
||||||
|
// other requeues- so the initial scheduled requeue may have been reset due to requeue on
|
||||||
|
// spec/status change.
|
||||||
|
return ctrl.Result{RequeueAfter: requeueAfter}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
notFound := false
|
||||||
|
offline := false
|
||||||
|
|
||||||
|
runnerBusy, err := r.GitHubClient.IsRunnerBusy(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
||||||
|
|
||||||
|
currentTime := time.Now()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err, "Failed to check if runner is busy")
|
var notFoundException *github.RunnerNotFound
|
||||||
return ctrl.Result{}, nil
|
var offlineException *github.RunnerOffline
|
||||||
|
if errors.As(err, ¬FoundException) {
|
||||||
|
notFound = true
|
||||||
|
} else if errors.As(err, &offlineException) {
|
||||||
|
offline = true
|
||||||
|
} else {
|
||||||
|
var e *gogithub.RateLimitError
|
||||||
|
if errors.As(err, &e) {
|
||||||
|
// We log the underlying error when we failed calling GitHub API to list or unregisters,
|
||||||
|
// or the runner is still busy.
|
||||||
|
log.Error(
|
||||||
|
err,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"Failed to check if runner is busy due to Github API rate limit. Retrying in %s to avoid excessive GitHub API calls",
|
||||||
|
retryDelayOnGitHubAPIRateLimitError,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
return ctrl.Result{RequeueAfter: retryDelayOnGitHubAPIRateLimitError}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// See the `newPod` function called above for more information
|
// See the `newPod` function called above for more information
|
||||||
@@ -209,11 +386,107 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
restart = true
|
restart = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
registrationTimeout := 10 * time.Minute
|
||||||
|
durationAfterRegistrationTimeout := currentTime.Sub(pod.CreationTimestamp.Add(registrationTimeout))
|
||||||
|
registrationDidTimeout := durationAfterRegistrationTimeout > 0
|
||||||
|
|
||||||
|
if notFound {
|
||||||
|
if registrationDidTimeout {
|
||||||
|
log.Info(
|
||||||
|
"Runner failed to register itself to GitHub in timely manner. "+
|
||||||
|
"Recreating the pod to see if it resolves the issue. "+
|
||||||
|
"CAUTION: If you see this a lot, you should investigate the root cause. "+
|
||||||
|
"See https://github.com/summerwind/actions-runner-controller/issues/288",
|
||||||
|
"podCreationTimestamp", pod.CreationTimestamp,
|
||||||
|
"currentTime", currentTime,
|
||||||
|
"configuredRegistrationTimeout", registrationTimeout,
|
||||||
|
)
|
||||||
|
|
||||||
|
restart = true
|
||||||
|
} else {
|
||||||
|
log.V(1).Info(
|
||||||
|
"Runner pod exists but we failed to check if runner is busy. Apparently it still needs more time.",
|
||||||
|
"runnerName", runner.Name,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
} else if offline {
|
||||||
|
if registrationOnly {
|
||||||
|
log.Info(
|
||||||
|
"Observed that registration-only runner for scaling-from-zero has successfully been registered.",
|
||||||
|
"podCreationTimestamp", pod.CreationTimestamp,
|
||||||
|
"currentTime", currentTime,
|
||||||
|
"configuredRegistrationTimeout", registrationTimeout,
|
||||||
|
)
|
||||||
|
} else if registrationDidTimeout {
|
||||||
|
log.Info(
|
||||||
|
"Already existing GitHub runner still appears offline . "+
|
||||||
|
"Recreating the pod to see if it resolves the issue. "+
|
||||||
|
"CAUTION: If you see this a lot, you should investigate the root cause. ",
|
||||||
|
"podCreationTimestamp", pod.CreationTimestamp,
|
||||||
|
"currentTime", currentTime,
|
||||||
|
"configuredRegistrationTimeout", registrationTimeout,
|
||||||
|
)
|
||||||
|
|
||||||
|
restart = true
|
||||||
|
} else {
|
||||||
|
log.V(1).Info(
|
||||||
|
"Runner pod exists but the GitHub runner appears to be still offline. Waiting for runner to get online ...",
|
||||||
|
"runnerName", runner.Name,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (notFound || (offline && !registrationOnly)) && !registrationDidTimeout {
|
||||||
|
registrationRecheckJitter := 10 * time.Second
|
||||||
|
if r.RegistrationRecheckJitter > 0 {
|
||||||
|
registrationRecheckJitter = r.RegistrationRecheckJitter
|
||||||
|
}
|
||||||
|
|
||||||
|
registrationRecheckDelay = registrationCheckInterval + wait.Jitter(registrationRecheckJitter, 0.1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Don't do anything if there's no need to restart the runner
|
// Don't do anything if there's no need to restart the runner
|
||||||
if !restart {
|
if !restart {
|
||||||
|
// This guard enables us to update runner.Status.Phase to `Running` only after
|
||||||
|
// the runner is registered to GitHub.
|
||||||
|
if registrationRecheckDelay > 0 {
|
||||||
|
log.V(1).Info(fmt.Sprintf("Rechecking the runner registration in %s", registrationRecheckDelay))
|
||||||
|
|
||||||
|
updated := runner.DeepCopy()
|
||||||
|
updated.Status.LastRegistrationCheckTime = &metav1.Time{Time: time.Now()}
|
||||||
|
|
||||||
|
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&runner)); err != nil {
|
||||||
|
log.Error(err, "Failed to update runner status for LastRegistrationCheckTime")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{RequeueAfter: registrationRecheckDelay}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if runner.Status.Phase != string(pod.Status.Phase) {
|
||||||
|
if pod.Status.Phase == corev1.PodRunning {
|
||||||
|
// Seeing this message, you can expect the runner to become `Running` soon.
|
||||||
|
log.Info(
|
||||||
|
"Runner appears to have registered and running.",
|
||||||
|
"podCreationTimestamp", pod.CreationTimestamp,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
updated := runner.DeepCopy()
|
||||||
|
updated.Status.Phase = string(pod.Status.Phase)
|
||||||
|
updated.Status.Reason = pod.Status.Reason
|
||||||
|
updated.Status.Message = pod.Status.Message
|
||||||
|
|
||||||
|
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&runner)); err != nil {
|
||||||
|
log.Error(err, "Failed to update runner status for Phase/Reason/Message")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Delete current pod if recreation is needed
|
// Delete current pod if recreation is needed
|
||||||
if err := r.Delete(ctx, &pod); err != nil {
|
if err := r.Delete(ctx, &pod); err != nil {
|
||||||
log.Error(err, "Failed to delete pod resource")
|
log.Error(err, "Failed to delete pod resource")
|
||||||
@@ -227,23 +500,8 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RunnerReconciler) isRunnerBusy(ctx context.Context, org, repo, name string) (bool, error) {
|
func (r *RunnerReconciler) unregisterRunner(ctx context.Context, enterprise, org, repo, name string) (bool, error) {
|
||||||
runners, err := r.GitHubClient.ListRunners(ctx, org, repo)
|
runners, err := r.GitHubClient.ListRunners(ctx, enterprise, org, repo)
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, runner := range runners {
|
|
||||||
if runner.GetName() == name {
|
|
||||||
return runner.GetBusy(), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, fmt.Errorf("runner not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RunnerReconciler) unregisterRunner(ctx context.Context, org, repo, name string) (bool, error) {
|
|
||||||
runners, err := r.GitHubClient.ListRunners(ctx, org, repo)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -263,7 +521,7 @@ func (r *RunnerReconciler) unregisterRunner(ctx context.Context, org, repo, name
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := r.GitHubClient.RemoveRunner(ctx, org, repo, id); err != nil {
|
if err := r.GitHubClient.RemoveRunner(ctx, enterprise, org, repo, id); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -277,7 +535,7 @@ func (r *RunnerReconciler) updateRegistrationToken(ctx context.Context, runner v
|
|||||||
|
|
||||||
log := r.Log.WithValues("runner", runner.Name)
|
log := r.Log.WithValues("runner", runner.Name)
|
||||||
|
|
||||||
rt, err := r.GitHubClient.GetRegistrationToken(ctx, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
rt, err := r.GitHubClient.GetRegistrationToken(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Recorder.Event(&runner, corev1.EventTypeWarning, "FailedUpdateRegistrationToken", "Updating registration token failed")
|
r.Recorder.Event(&runner, corev1.EventTypeWarning, "FailedUpdateRegistrationToken", "Updating registration token failed")
|
||||||
log.Error(err, "Failed to get new registration token")
|
log.Error(err, "Failed to get new registration token")
|
||||||
@@ -293,8 +551,8 @@ func (r *RunnerReconciler) updateRegistrationToken(ctx context.Context, runner v
|
|||||||
ExpiresAt: metav1.NewTime(rt.GetExpiresAt().Time),
|
ExpiresAt: metav1.NewTime(rt.GetExpiresAt().Time),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := r.Status().Update(ctx, updated); err != nil {
|
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&runner)); err != nil {
|
||||||
log.Error(err, "Failed to update runner status")
|
log.Error(err, "Failed to update runner status for Registration")
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -309,6 +567,8 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
privileged bool = true
|
privileged bool = true
|
||||||
dockerdInRunner bool = runner.Spec.DockerdWithinRunnerContainer != nil && *runner.Spec.DockerdWithinRunnerContainer
|
dockerdInRunner bool = runner.Spec.DockerdWithinRunnerContainer != nil && *runner.Spec.DockerdWithinRunnerContainer
|
||||||
dockerEnabled bool = runner.Spec.DockerEnabled == nil || *runner.Spec.DockerEnabled
|
dockerEnabled bool = runner.Spec.DockerEnabled == nil || *runner.Spec.DockerEnabled
|
||||||
|
ephemeral bool = runner.Spec.Ephemeral == nil || *runner.Spec.Ephemeral
|
||||||
|
dockerdInRunnerPrivileged bool = dockerdInRunner
|
||||||
)
|
)
|
||||||
|
|
||||||
runnerImage := runner.Spec.Image
|
runnerImage := runner.Spec.Image
|
||||||
@@ -339,6 +599,10 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
Name: "RUNNER_REPO",
|
Name: "RUNNER_REPO",
|
||||||
Value: runner.Spec.Repository,
|
Value: runner.Spec.Repository,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_ENTERPRISE",
|
||||||
|
Value: runner.Spec.Enterprise,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "RUNNER_LABELS",
|
Name: "RUNNER_LABELS",
|
||||||
Value: strings.Join(runner.Spec.Labels, ","),
|
Value: strings.Join(runner.Spec.Labels, ","),
|
||||||
@@ -363,6 +627,18 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
Name: "RUNNER_WORKDIR",
|
Name: "RUNNER_WORKDIR",
|
||||||
Value: workDir,
|
Value: workDir,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_EPHEMERAL",
|
||||||
|
Value: fmt.Sprintf("%v", ephemeral),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if metav1.HasAnnotation(runner.ObjectMeta, annotationKeyRegistrationOnly) {
|
||||||
|
env = append(env, corev1.EnvVar{
|
||||||
|
Name: "RUNNER_REGISTRATION_ONLY",
|
||||||
|
Value: "true",
|
||||||
|
},
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
env = append(env, runner.Spec.Env...)
|
env = append(env, runner.Spec.Env...)
|
||||||
@@ -399,6 +675,15 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
r.GitHubClient.GithubBaseURL,
|
r.GitHubClient.GithubBaseURL,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var seLinuxOptions *corev1.SELinuxOptions
|
||||||
|
if runner.Spec.SecurityContext != nil {
|
||||||
|
seLinuxOptions = runner.Spec.SecurityContext.SELinuxOptions
|
||||||
|
if seLinuxOptions != nil {
|
||||||
|
privileged = false
|
||||||
|
dockerdInRunnerPrivileged = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pod := corev1.Pod{
|
pod := corev1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: runner.Name,
|
Name: runner.Name,
|
||||||
@@ -417,7 +702,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
EnvFrom: runner.Spec.EnvFrom,
|
EnvFrom: runner.Spec.EnvFrom,
|
||||||
SecurityContext: &corev1.SecurityContext{
|
SecurityContext: &corev1.SecurityContext{
|
||||||
// Runner need to run privileged if it contains DinD
|
// Runner need to run privileged if it contains DinD
|
||||||
Privileged: runner.Spec.DockerdWithinRunnerContainer,
|
Privileged: &dockerdInRunnerPrivileged,
|
||||||
},
|
},
|
||||||
Resources: runner.Spec.Resources,
|
Resources: runner.Spec.Resources,
|
||||||
},
|
},
|
||||||
@@ -425,45 +710,81 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if !dockerdInRunner && dockerEnabled {
|
if mtu := runner.Spec.DockerMTU; mtu != nil && dockerdInRunner {
|
||||||
|
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, []corev1.EnvVar{
|
||||||
|
{
|
||||||
|
Name: "MTU",
|
||||||
|
Value: fmt.Sprintf("%d", *runner.Spec.DockerMTU),
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if mirror := runner.Spec.DockerRegistryMirror; mirror != nil && dockerdInRunner {
|
||||||
|
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, []corev1.EnvVar{
|
||||||
|
{
|
||||||
|
Name: "DOCKER_REGISTRY_MIRROR",
|
||||||
|
Value: *runner.Spec.DockerRegistryMirror,
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// /runner must be generated on runtime from /runnertmp embedded in the container image.
|
||||||
|
//
|
||||||
|
// When you're NOT using dindWithinRunner=true,
|
||||||
|
// it must also be shared with the dind container as it seems like required to run docker steps.
|
||||||
|
//
|
||||||
|
|
||||||
runnerVolumeName := "runner"
|
runnerVolumeName := "runner"
|
||||||
runnerVolumeMountPath := "/runner"
|
runnerVolumeMountPath := "/runner"
|
||||||
|
runnerVolumeEmptyDir := &corev1.EmptyDirVolumeSource{}
|
||||||
|
|
||||||
pod.Spec.Volumes = []corev1.Volume{
|
if runner.Spec.VolumeSizeLimit != nil {
|
||||||
{
|
runnerVolumeEmptyDir.SizeLimit = runner.Spec.VolumeSizeLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
||||||
|
corev1.Volume{
|
||||||
|
Name: runnerVolumeName,
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
EmptyDir: runnerVolumeEmptyDir,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts,
|
||||||
|
corev1.VolumeMount{
|
||||||
|
Name: runnerVolumeName,
|
||||||
|
MountPath: runnerVolumeMountPath,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if !dockerdInRunner && dockerEnabled {
|
||||||
|
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
||||||
|
corev1.Volume{
|
||||||
Name: "work",
|
Name: "work",
|
||||||
VolumeSource: corev1.VolumeSource{
|
VolumeSource: corev1.VolumeSource{
|
||||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
corev1.Volume{
|
||||||
Name: runnerVolumeName,
|
|
||||||
VolumeSource: corev1.VolumeSource{
|
|
||||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "certs-client",
|
Name: "certs-client",
|
||||||
VolumeSource: corev1.VolumeSource{
|
VolumeSource: corev1.VolumeSource{
|
||||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
)
|
||||||
pod.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{
|
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts,
|
||||||
{
|
corev1.VolumeMount{
|
||||||
Name: "work",
|
Name: "work",
|
||||||
MountPath: workDir,
|
MountPath: workDir,
|
||||||
},
|
},
|
||||||
{
|
corev1.VolumeMount{
|
||||||
Name: runnerVolumeName,
|
|
||||||
MountPath: runnerVolumeMountPath,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "certs-client",
|
Name: "certs-client",
|
||||||
MountPath: "/certs/client",
|
MountPath: "/certs/client",
|
||||||
ReadOnly: true,
|
ReadOnly: true,
|
||||||
},
|
},
|
||||||
}
|
)
|
||||||
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, []corev1.EnvVar{
|
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, []corev1.EnvVar{
|
||||||
{
|
{
|
||||||
Name: "DOCKER_HOST",
|
Name: "DOCKER_HOST",
|
||||||
@@ -478,10 +799,10 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
Value: "/certs/client",
|
Value: "/certs/client",
|
||||||
},
|
},
|
||||||
}...)
|
}...)
|
||||||
pod.Spec.Containers = append(pod.Spec.Containers, corev1.Container{
|
|
||||||
Name: "docker",
|
// Determine the volume mounts assigned to the docker sidecar. In case extra mounts are included in the RunnerSpec, append them to the standard
|
||||||
Image: r.DockerImage,
|
// set of mounts. See https://github.com/summerwind/actions-runner-controller/issues/435 for context.
|
||||||
VolumeMounts: []corev1.VolumeMount{
|
dockerVolumeMounts := []corev1.VolumeMount{
|
||||||
{
|
{
|
||||||
Name: "work",
|
Name: "work",
|
||||||
MountPath: workDir,
|
MountPath: workDir,
|
||||||
@@ -494,7 +815,15 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
Name: "certs-client",
|
Name: "certs-client",
|
||||||
MountPath: "/certs/client",
|
MountPath: "/certs/client",
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
|
if extraDockerVolumeMounts := runner.Spec.DockerVolumeMounts; extraDockerVolumeMounts != nil {
|
||||||
|
dockerVolumeMounts = append(dockerVolumeMounts, extraDockerVolumeMounts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
pod.Spec.Containers = append(pod.Spec.Containers, corev1.Container{
|
||||||
|
Name: "docker",
|
||||||
|
Image: r.DockerImage,
|
||||||
|
VolumeMounts: dockerVolumeMounts,
|
||||||
Env: []corev1.EnvVar{
|
Env: []corev1.EnvVar{
|
||||||
{
|
{
|
||||||
Name: "DOCKER_TLS_CERTDIR",
|
Name: "DOCKER_TLS_CERTDIR",
|
||||||
@@ -503,9 +832,31 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
},
|
},
|
||||||
SecurityContext: &corev1.SecurityContext{
|
SecurityContext: &corev1.SecurityContext{
|
||||||
Privileged: &privileged,
|
Privileged: &privileged,
|
||||||
|
SELinuxOptions: seLinuxOptions,
|
||||||
},
|
},
|
||||||
|
Resources: runner.Spec.DockerdContainerResources,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if mtu := runner.Spec.DockerMTU; mtu != nil {
|
||||||
|
pod.Spec.Containers[1].Env = append(pod.Spec.Containers[1].Env, []corev1.EnvVar{
|
||||||
|
// See https://docs.docker.com/engine/security/rootless/
|
||||||
|
{
|
||||||
|
Name: "DOCKERD_ROOTLESS_ROOTLESSKIT_MTU",
|
||||||
|
Value: fmt.Sprintf("%d", *runner.Spec.DockerMTU),
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
|
||||||
|
pod.Spec.Containers[1].Args = append(pod.Spec.Containers[1].Args,
|
||||||
|
"--mtu",
|
||||||
|
fmt.Sprintf("%d", *runner.Spec.DockerMTU),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if mirror := runner.Spec.DockerRegistryMirror; mirror != nil {
|
||||||
|
pod.Spec.Containers[1].Args = append(pod.Spec.Containers[1].Args,
|
||||||
|
fmt.Sprintf("--registry-mirror=%s", *runner.Spec.DockerRegistryMirror),
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(runner.Spec.Containers) != 0 {
|
if len(runner.Spec.Containers) != 0 {
|
||||||
@@ -566,6 +917,14 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
pod.Spec.TerminationGracePeriodSeconds = runner.Spec.TerminationGracePeriodSeconds
|
pod.Spec.TerminationGracePeriodSeconds = runner.Spec.TerminationGracePeriodSeconds
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(runner.Spec.HostAliases) != 0 {
|
||||||
|
pod.Spec.HostAliases = runner.Spec.HostAliases
|
||||||
|
}
|
||||||
|
|
||||||
|
if runner.Spec.RuntimeClassName != nil {
|
||||||
|
pod.Spec.RuntimeClassName = runner.Spec.RuntimeClassName
|
||||||
|
}
|
||||||
|
|
||||||
if err := ctrl.SetControllerReference(&runner, &pod, r.Scheme); err != nil {
|
if err := ctrl.SetControllerReference(&runner, &pod, r.Scheme); err != nil {
|
||||||
return pod, err
|
return pod, err
|
||||||
}
|
}
|
||||||
@@ -574,11 +933,17 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
r.Recorder = mgr.GetEventRecorderFor("runner-controller")
|
name := "runner-controller"
|
||||||
|
if r.Name != "" {
|
||||||
|
name = r.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||||
|
|
||||||
return ctrl.NewControllerManagedBy(mgr).
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
For(&v1alpha1.Runner{}).
|
For(&v1alpha1.Runner{}).
|
||||||
Owns(&corev1.Pod{}).
|
Owns(&corev1.Pod{}).
|
||||||
|
Named(name).
|
||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/fnv"
|
"hash/fnv"
|
||||||
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -37,10 +38,12 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
"github.com/summerwind/actions-runner-controller/controllers/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
LabelKeyRunnerTemplateHash = "runner-template-hash"
|
LabelKeyRunnerTemplateHash = "runner-template-hash"
|
||||||
|
LabelKeyRunnerDeploymentName = "runner-deployment-name"
|
||||||
|
|
||||||
runnerSetOwnerKey = ".metadata.controller"
|
runnerSetOwnerKey = ".metadata.controller"
|
||||||
)
|
)
|
||||||
@@ -51,6 +54,8 @@ type RunnerDeploymentReconciler struct {
|
|||||||
Log logr.Logger
|
Log logr.Logger
|
||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
|
CommonRunnerLabels []string
|
||||||
|
Name string
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerdeployments,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerdeployments,verbs=get;list;watch;create;update;patch;delete
|
||||||
@@ -73,6 +78,8 @@ func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metrics.SetRunnerDeployment(rd)
|
||||||
|
|
||||||
var myRunnerReplicaSetList v1alpha1.RunnerReplicaSetList
|
var myRunnerReplicaSetList v1alpha1.RunnerReplicaSetList
|
||||||
if err := r.List(ctx, &myRunnerReplicaSetList, client.InNamespace(req.Namespace), client.MatchingFields{runnerSetOwnerKey: req.Name}); err != nil {
|
if err := r.List(ctx, &myRunnerReplicaSetList, client.InNamespace(req.Namespace), client.MatchingFields{runnerSetOwnerKey: req.Name}); err != nil {
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -141,6 +148,28 @@ func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
|
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(newestSet.Spec.Selector, desiredRS.Spec.Selector) {
|
||||||
|
updateSet := newestSet.DeepCopy()
|
||||||
|
updateSet.Spec = *desiredRS.Spec.DeepCopy()
|
||||||
|
|
||||||
|
// A selector update change doesn't trigger replicaset replacement,
|
||||||
|
// but we still need to update the existing replicaset with it.
|
||||||
|
// Otherwise selector-based runner query will never work on replicasets created before the controller v0.17.0
|
||||||
|
// See https://github.com/summerwind/actions-runner-controller/pull/355#discussion_r585379259
|
||||||
|
if err := r.Client.Update(ctx, updateSet); err != nil {
|
||||||
|
log.Error(err, "Failed to update runnerreplicaset resource")
|
||||||
|
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point, we are already sure that there's no need to create a new replicaset
|
||||||
|
// as the runner template hash is not changed.
|
||||||
|
//
|
||||||
|
// But we still need to requeue for the (possibly rare) cases that there are still old replicasets that needs
|
||||||
|
// to be cleaned up.
|
||||||
|
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
|
||||||
|
}
|
||||||
|
|
||||||
const defaultReplicas = 1
|
const defaultReplicas = 1
|
||||||
|
|
||||||
currentDesiredReplicas := getIntOrDefault(newestSet.Spec.Replicas, defaultReplicas)
|
currentDesiredReplicas := getIntOrDefault(newestSet.Spec.Replicas, defaultReplicas)
|
||||||
@@ -159,25 +188,42 @@ func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do we old runner replica sets that should eventually deleted?
|
// Do we have old runner replica sets that should eventually deleted?
|
||||||
if len(oldSets) > 0 {
|
if len(oldSets) > 0 {
|
||||||
readyReplicas := newestSet.Status.ReadyReplicas
|
var readyReplicas int
|
||||||
|
if newestSet.Status.ReadyReplicas != nil {
|
||||||
|
readyReplicas = *newestSet.Status.ReadyReplicas
|
||||||
|
}
|
||||||
|
|
||||||
if readyReplicas < currentDesiredReplicas {
|
oldSetsCount := len(oldSets)
|
||||||
log.WithValues("runnerreplicaset", types.NamespacedName{
|
|
||||||
|
logWithDebugInfo := log.WithValues(
|
||||||
|
"newest_runnerreplicaset", types.NamespacedName{
|
||||||
Namespace: newestSet.Namespace,
|
Namespace: newestSet.Namespace,
|
||||||
Name: newestSet.Name,
|
Name: newestSet.Name,
|
||||||
}).
|
},
|
||||||
|
"newest_runnerreplicaset_replicas_ready", readyReplicas,
|
||||||
|
"newest_runnerreplicaset_replicas_desired", currentDesiredReplicas,
|
||||||
|
"old_runnerreplicasets_count", oldSetsCount,
|
||||||
|
)
|
||||||
|
|
||||||
|
if readyReplicas < currentDesiredReplicas {
|
||||||
|
logWithDebugInfo.
|
||||||
Info("Waiting until the newest runnerreplicaset to be 100% available")
|
Info("Waiting until the newest runnerreplicaset to be 100% available")
|
||||||
|
|
||||||
return ctrl.Result{RequeueAfter: 10 * time.Second}, nil
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldSetsCount > 0 {
|
||||||
|
logWithDebugInfo.
|
||||||
|
Info("The newest runnerreplicaset is 100% available. Deleting old runnerreplicasets")
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range oldSets {
|
for i := range oldSets {
|
||||||
rs := oldSets[i]
|
rs := oldSets[i]
|
||||||
|
|
||||||
if err := r.Client.Delete(ctx, &rs); err != nil {
|
if err := r.Client.Delete(ctx, &rs); err != nil {
|
||||||
log.Error(err, "Failed to delete runner resource")
|
log.Error(err, "Failed to delete runnerreplicaset resource")
|
||||||
|
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
@@ -188,14 +234,49 @@ func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if rd.Spec.Replicas == nil && desiredRS.Spec.Replicas != nil {
|
var replicaSets []v1alpha1.RunnerReplicaSet
|
||||||
|
|
||||||
|
replicaSets = append(replicaSets, *newestSet)
|
||||||
|
replicaSets = append(replicaSets, oldSets...)
|
||||||
|
|
||||||
|
var totalCurrentReplicas, totalStatusAvailableReplicas, updatedReplicas int
|
||||||
|
|
||||||
|
for _, rs := range replicaSets {
|
||||||
|
var current, available int
|
||||||
|
|
||||||
|
if rs.Status.Replicas != nil {
|
||||||
|
current = *rs.Status.Replicas
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Status.AvailableReplicas != nil {
|
||||||
|
available = *rs.Status.AvailableReplicas
|
||||||
|
}
|
||||||
|
|
||||||
|
totalCurrentReplicas += current
|
||||||
|
totalStatusAvailableReplicas += available
|
||||||
|
}
|
||||||
|
|
||||||
|
if newestSet.Status.Replicas != nil {
|
||||||
|
updatedReplicas = *newestSet.Status.Replicas
|
||||||
|
}
|
||||||
|
|
||||||
|
var status v1alpha1.RunnerDeploymentStatus
|
||||||
|
|
||||||
|
status.AvailableReplicas = &totalStatusAvailableReplicas
|
||||||
|
status.ReadyReplicas = &totalStatusAvailableReplicas
|
||||||
|
status.DesiredReplicas = &newDesiredReplicas
|
||||||
|
status.Replicas = &totalCurrentReplicas
|
||||||
|
status.UpdatedReplicas = &updatedReplicas
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(rd.Status, status) {
|
||||||
updated := rd.DeepCopy()
|
updated := rd.DeepCopy()
|
||||||
updated.Status.Replicas = desiredRS.Spec.Replicas
|
updated.Status = status
|
||||||
|
|
||||||
if err := r.Status().Update(ctx, updated); err != nil {
|
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&rd)); err != nil {
|
||||||
log.Error(err, "Failed to update runnerdeployment status")
|
log.Info("Failed to patch runnerdeployment status. Retrying immediately", "error", err.Error())
|
||||||
|
return ctrl.Result{
|
||||||
return ctrl.Result{}, err
|
Requeue: true,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -256,28 +337,94 @@ func CloneAndAddLabel(labels map[string]string, labelKey, labelValue string) map
|
|||||||
return newLabels
|
return newLabels
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RunnerDeploymentReconciler) newRunnerReplicaSet(rd v1alpha1.RunnerDeployment) (*v1alpha1.RunnerReplicaSet, error) {
|
// Clones the given selector and returns a new selector with the given key and value added.
|
||||||
newRSTemplate := *rd.Spec.Template.DeepCopy()
|
// Returns the given selector, if labelKey is empty.
|
||||||
templateHash := ComputeHash(&newRSTemplate)
|
//
|
||||||
// Add template hash label to selector.
|
// Proudly copied from k8s.io/kubernetes/pkg/util/labels.CloneSelectorAndAddLabel
|
||||||
labels := CloneAndAddLabel(rd.Spec.Template.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
func CloneSelectorAndAddLabel(selector *metav1.LabelSelector, labelKey, labelValue string) *metav1.LabelSelector {
|
||||||
|
if labelKey == "" {
|
||||||
|
// Don't need to add a label.
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
newRSTemplate.Labels = labels
|
// Clone.
|
||||||
|
newSelector := new(metav1.LabelSelector)
|
||||||
|
|
||||||
|
newSelector.MatchLabels = make(map[string]string)
|
||||||
|
if selector.MatchLabels != nil {
|
||||||
|
for key, val := range selector.MatchLabels {
|
||||||
|
newSelector.MatchLabels[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
newSelector.MatchLabels[labelKey] = labelValue
|
||||||
|
|
||||||
|
if selector.MatchExpressions != nil {
|
||||||
|
newMExps := make([]metav1.LabelSelectorRequirement, len(selector.MatchExpressions))
|
||||||
|
for i, me := range selector.MatchExpressions {
|
||||||
|
newMExps[i].Key = me.Key
|
||||||
|
newMExps[i].Operator = me.Operator
|
||||||
|
if me.Values != nil {
|
||||||
|
newMExps[i].Values = make([]string, len(me.Values))
|
||||||
|
copy(newMExps[i].Values, me.Values)
|
||||||
|
} else {
|
||||||
|
newMExps[i].Values = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
newSelector.MatchExpressions = newMExps
|
||||||
|
} else {
|
||||||
|
newSelector.MatchExpressions = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return newSelector
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RunnerDeploymentReconciler) newRunnerReplicaSet(rd v1alpha1.RunnerDeployment) (*v1alpha1.RunnerReplicaSet, error) {
|
||||||
|
return newRunnerReplicaSet(&rd, r.CommonRunnerLabels, r.Scheme)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSelector(rd *v1alpha1.RunnerDeployment) *metav1.LabelSelector {
|
||||||
|
selector := rd.Spec.Selector
|
||||||
|
if selector == nil {
|
||||||
|
selector = &metav1.LabelSelector{MatchLabels: map[string]string{LabelKeyRunnerDeploymentName: rd.Name}}
|
||||||
|
}
|
||||||
|
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRunnerReplicaSet(rd *v1alpha1.RunnerDeployment, commonRunnerLabels []string, scheme *runtime.Scheme) (*v1alpha1.RunnerReplicaSet, error) {
|
||||||
|
newRSTemplate := *rd.Spec.Template.DeepCopy()
|
||||||
|
|
||||||
|
for _, l := range commonRunnerLabels {
|
||||||
|
newRSTemplate.Spec.Labels = append(newRSTemplate.Spec.Labels, l)
|
||||||
|
}
|
||||||
|
|
||||||
|
templateHash := ComputeHash(&newRSTemplate)
|
||||||
|
|
||||||
|
// Add template hash label to selector.
|
||||||
|
newRSTemplate.ObjectMeta.Labels = CloneAndAddLabel(newRSTemplate.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||||
|
|
||||||
|
// This label selector is used by default when rd.Spec.Selector is empty.
|
||||||
|
newRSTemplate.ObjectMeta.Labels = CloneAndAddLabel(newRSTemplate.ObjectMeta.Labels, LabelKeyRunnerDeploymentName, rd.Name)
|
||||||
|
|
||||||
|
selector := getSelector(rd)
|
||||||
|
|
||||||
|
newRSSelector := CloneSelectorAndAddLabel(selector, LabelKeyRunnerTemplateHash, templateHash)
|
||||||
|
|
||||||
rs := v1alpha1.RunnerReplicaSet{
|
rs := v1alpha1.RunnerReplicaSet{
|
||||||
TypeMeta: metav1.TypeMeta{},
|
TypeMeta: metav1.TypeMeta{},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
GenerateName: rd.ObjectMeta.Name + "-",
|
GenerateName: rd.ObjectMeta.Name + "-",
|
||||||
Namespace: rd.ObjectMeta.Namespace,
|
Namespace: rd.ObjectMeta.Namespace,
|
||||||
Labels: labels,
|
Labels: newRSTemplate.ObjectMeta.Labels,
|
||||||
},
|
},
|
||||||
Spec: v1alpha1.RunnerReplicaSetSpec{
|
Spec: v1alpha1.RunnerReplicaSetSpec{
|
||||||
Replicas: rd.Spec.Replicas,
|
Replicas: rd.Spec.Replicas,
|
||||||
|
Selector: newRSSelector,
|
||||||
Template: newRSTemplate,
|
Template: newRSTemplate,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := ctrl.SetControllerReference(&rd, &rs, r.Scheme); err != nil {
|
if err := ctrl.SetControllerReference(rd, &rs, scheme); err != nil {
|
||||||
return &rs, err
|
return &rs, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -285,7 +432,12 @@ func (r *RunnerDeploymentReconciler) newRunnerReplicaSet(rd v1alpha1.RunnerDeplo
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *RunnerDeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *RunnerDeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
r.Recorder = mgr.GetEventRecorderFor("runnerdeployment-controller")
|
name := "runnerdeployment-controller"
|
||||||
|
if r.Name != "" {
|
||||||
|
name = r.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||||
|
|
||||||
if err := mgr.GetFieldIndexer().IndexField(&v1alpha1.RunnerReplicaSet{}, runnerSetOwnerKey, func(rawObj runtime.Object) []string {
|
if err := mgr.GetFieldIndexer().IndexField(&v1alpha1.RunnerReplicaSet{}, runnerSetOwnerKey, func(rawObj runtime.Object) []string {
|
||||||
runnerSet := rawObj.(*v1alpha1.RunnerReplicaSet)
|
runnerSet := rawObj.(*v1alpha1.RunnerReplicaSet)
|
||||||
@@ -306,5 +458,6 @@ func (r *RunnerDeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||||||
return ctrl.NewControllerManagedBy(mgr).
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
For(&v1alpha1.RunnerDeployment{}).
|
For(&v1alpha1.RunnerDeployment{}).
|
||||||
Owns(&v1alpha1.RunnerReplicaSet{}).
|
Owns(&v1alpha1.RunnerReplicaSet{}).
|
||||||
|
Named(name).
|
||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,8 +2,13 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
@@ -18,6 +23,103 @@ import (
|
|||||||
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestNewRunnerReplicaSet(t *testing.T) {
|
||||||
|
scheme := runtime.NewScheme()
|
||||||
|
if err := actionsv1alpha1.AddToScheme(scheme); err != nil {
|
||||||
|
t.Fatalf("%v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r := &RunnerDeploymentReconciler{
|
||||||
|
CommonRunnerLabels: []string{"dev"},
|
||||||
|
Scheme: scheme,
|
||||||
|
}
|
||||||
|
rd := actionsv1alpha1.RunnerDeployment{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "example",
|
||||||
|
},
|
||||||
|
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Template: actionsv1alpha1.RunnerTemplate{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
|
Labels: []string{"project1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
rs, err := r.newRunnerReplicaSet(rd)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if val, ok := rs.Labels["foo"]; ok {
|
||||||
|
if val != "bar" {
|
||||||
|
t.Errorf("foo label does not have bar but %v", val)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
t.Errorf("foo label does not exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
hash1, ok := rs.Labels[LabelKeyRunnerTemplateHash]
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("missing runner-template-hash label")
|
||||||
|
}
|
||||||
|
|
||||||
|
runnerLabel := []string{"project1", "dev"}
|
||||||
|
if d := cmp.Diff(runnerLabel, rs.Spec.Template.Spec.Labels); d != "" {
|
||||||
|
t.Errorf("%s", d)
|
||||||
|
}
|
||||||
|
|
||||||
|
rd2 := rd.DeepCopy()
|
||||||
|
rd2.Spec.Template.Spec.Labels = []string{"project2"}
|
||||||
|
|
||||||
|
rs2, err := r.newRunnerReplicaSet(*rd2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hash2, ok := rs2.Labels[LabelKeyRunnerTemplateHash]
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("missing runner-template-hash label")
|
||||||
|
}
|
||||||
|
|
||||||
|
if hash1 == hash2 {
|
||||||
|
t.Errorf(
|
||||||
|
"runner replica sets from runner deployments with varying labels must have different template hash, but got %s and %s",
|
||||||
|
hash1, hash2,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
rd3 := rd.DeepCopy()
|
||||||
|
rd3.Spec.Template.Labels["foo"] = "baz"
|
||||||
|
|
||||||
|
rs3, err := r.newRunnerReplicaSet(*rd3)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hash3, ok := rs3.Labels[LabelKeyRunnerTemplateHash]
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("missing runner-template-hash label")
|
||||||
|
}
|
||||||
|
|
||||||
|
if hash1 == hash3 {
|
||||||
|
t.Errorf(
|
||||||
|
"runner replica sets from runner deployments with varying meta labels must have different template hash, but got %s and %s",
|
||||||
|
hash1, hash3,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// SetupDeploymentTest will set up a testing environment.
|
// SetupDeploymentTest will set up a testing environment.
|
||||||
// This includes:
|
// This includes:
|
||||||
// * creating a Namespace to be used during the test
|
// * creating a Namespace to be used during the test
|
||||||
@@ -37,7 +139,9 @@ func SetupDeploymentTest(ctx context.Context) *corev1.Namespace {
|
|||||||
err := k8sClient.Create(ctx, ns)
|
err := k8sClient.Create(ctx, ns)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
|
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
|
||||||
|
|
||||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{})
|
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||||
|
Namespace: ns.Name,
|
||||||
|
})
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
||||||
|
|
||||||
controller := &RunnerDeploymentReconciler{
|
controller := &RunnerDeploymentReconciler{
|
||||||
@@ -45,6 +149,7 @@ func SetupDeploymentTest(ctx context.Context) *corev1.Namespace {
|
|||||||
Scheme: scheme.Scheme,
|
Scheme: scheme.Scheme,
|
||||||
Log: logf.Log,
|
Log: logf.Log,
|
||||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||||
|
Name: "runnerdeployment-" + ns.Name,
|
||||||
}
|
}
|
||||||
err = controller.SetupWithManager(mgr)
|
err = controller.SetupWithManager(mgr)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||||
@@ -74,7 +179,7 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
Describe("when no existing resources exist", func() {
|
Describe("when no existing resources exist", func() {
|
||||||
|
|
||||||
It("should create a new RunnerReplicaSet resource from the specified template, add a another RunnerReplicaSet on template modification, and eventually removes old runnerreplicasets", func() {
|
It("should create a new RunnerReplicaSet resource from the specified template, add a another RunnerReplicaSet on template modification, and eventually removes old runnerreplicasets", func() {
|
||||||
name := "example-runnerdeploy"
|
name := "example-runnerdeploy-1"
|
||||||
|
|
||||||
{
|
{
|
||||||
rs := &actionsv1alpha1.RunnerDeployment{
|
rs := &actionsv1alpha1.RunnerDeployment{
|
||||||
@@ -84,9 +189,19 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
},
|
},
|
||||||
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||||
Replicas: intPtr(1),
|
Replicas: intPtr(1),
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
Template: actionsv1alpha1.RunnerTemplate{
|
Template: actionsv1alpha1.RunnerTemplate{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
Spec: actionsv1alpha1.RunnerSpec{
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
Repository: "foo/bar",
|
Repository: "test/valid",
|
||||||
Image: "bar",
|
Image: "bar",
|
||||||
Env: []corev1.EnvVar{
|
Env: []corev1.EnvVar{
|
||||||
{Name: "FOO", Value: "FOOVALUE"},
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
@@ -103,29 +218,25 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||||
|
|
||||||
Eventually(
|
Eventually(
|
||||||
func() int {
|
func() (int, error) {
|
||||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logf.Log.Error(err, "list runner sets")
|
return 0, err
|
||||||
}
|
}
|
||||||
|
err = k8sClient.List(
|
||||||
return len(runnerSets.Items)
|
ctx,
|
||||||
},
|
&runnerSets,
|
||||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
|
client.InNamespace(ns.Name),
|
||||||
|
client.MatchingLabelsSelector{Selector: selector},
|
||||||
Eventually(
|
)
|
||||||
func() int {
|
|
||||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logf.Log.Error(err, "list runner sets")
|
return 0, err
|
||||||
|
}
|
||||||
|
if len(runnerSets.Items) != 1 {
|
||||||
|
return 0, fmt.Errorf("runnerreplicasets is not 1 but %d", len(runnerSets.Items))
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(runnerSets.Items) == 0 {
|
return *runnerSets.Items[0].Spec.Replicas, nil
|
||||||
logf.Log.Info("No runnerreplicasets exist yet")
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
return *runnerSets.Items[0].Spec.Replicas
|
|
||||||
},
|
},
|
||||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
|
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
|
||||||
}
|
}
|
||||||
@@ -134,13 +245,12 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
// We wrap the update in the Eventually block to avoid the below error that occurs due to concurrent modification
|
// We wrap the update in the Eventually block to avoid the below error that occurs due to concurrent modification
|
||||||
// made by the controller to update .Status.AvailableReplicas and .Status.ReadyReplicas
|
// made by the controller to update .Status.AvailableReplicas and .Status.ReadyReplicas
|
||||||
// Operation cannot be fulfilled on runnersets.actions.summerwind.dev "example-runnerset": the object has been modified; please apply your changes to the latest version and try again
|
// Operation cannot be fulfilled on runnersets.actions.summerwind.dev "example-runnerset": the object has been modified; please apply your changes to the latest version and try again
|
||||||
Eventually(func() error {
|
|
||||||
var rd actionsv1alpha1.RunnerDeployment
|
var rd actionsv1alpha1.RunnerDeployment
|
||||||
|
Eventually(func() error {
|
||||||
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: name}, &rd)
|
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: name}, &rd)
|
||||||
|
if err != nil {
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to get test RunnerReplicaSet resource")
|
return fmt.Errorf("failed to get test RunnerReplicaSet resource: %v\n", err)
|
||||||
|
}
|
||||||
rd.Spec.Replicas = intPtr(2)
|
rd.Spec.Replicas = intPtr(2)
|
||||||
|
|
||||||
return k8sClient.Update(ctx, &rd)
|
return k8sClient.Update(ctx, &rd)
|
||||||
@@ -150,27 +260,222 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||||
|
|
||||||
Eventually(
|
Eventually(
|
||||||
func() int {
|
func() (int, error) {
|
||||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
selector, err := metav1.LabelSelectorAsSelector(rd.Spec.Selector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logf.Log.Error(err, "list runner sets")
|
return 0, err
|
||||||
|
}
|
||||||
|
err = k8sClient.List(
|
||||||
|
ctx,
|
||||||
|
&runnerSets,
|
||||||
|
client.InNamespace(ns.Name),
|
||||||
|
client.MatchingLabelsSelector{Selector: selector},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if len(runnerSets.Items) != 1 {
|
||||||
|
return 0, fmt.Errorf("runnerreplicasets is not 1 but %d", len(runnerSets.Items))
|
||||||
}
|
}
|
||||||
|
|
||||||
return len(runnerSets.Items)
|
return *runnerSets.Items[0].Spec.Replicas, nil
|
||||||
},
|
|
||||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
|
|
||||||
|
|
||||||
Eventually(
|
|
||||||
func() int {
|
|
||||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
|
||||||
if err != nil {
|
|
||||||
logf.Log.Error(err, "list runner sets")
|
|
||||||
}
|
|
||||||
|
|
||||||
return *runnerSets.Items[0].Spec.Replicas
|
|
||||||
},
|
},
|
||||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(2))
|
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(2))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
It("should create a new RunnerReplicaSet resource from the specified template without labels and selector, add a another RunnerReplicaSet on template modification, and eventually removes old runnerreplicasets", func() {
|
||||||
|
name := "example-runnerdeploy-2"
|
||||||
|
|
||||||
|
{
|
||||||
|
rs := &actionsv1alpha1.RunnerDeployment{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: ns.Name,
|
||||||
|
},
|
||||||
|
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||||
|
Replicas: intPtr(1),
|
||||||
|
Template: actionsv1alpha1.RunnerTemplate{
|
||||||
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
|
Repository: "test/valid",
|
||||||
|
Image: "bar",
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := k8sClient.Create(ctx, rs)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create test RunnerReplicaSet resource")
|
||||||
|
|
||||||
|
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() (int, error) {
|
||||||
|
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
err = k8sClient.List(
|
||||||
|
ctx,
|
||||||
|
&runnerSets,
|
||||||
|
client.InNamespace(ns.Name),
|
||||||
|
client.MatchingLabelsSelector{Selector: selector},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if len(runnerSets.Items) != 1 {
|
||||||
|
return 0, fmt.Errorf("runnerreplicasets is not 1 but %d", len(runnerSets.Items))
|
||||||
|
}
|
||||||
|
|
||||||
|
return *runnerSets.Items[0].Spec.Replicas, nil
|
||||||
|
},
|
||||||
|
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// We wrap the update in the Eventually block to avoid the below error that occurs due to concurrent modification
|
||||||
|
// made by the controller to update .Status.AvailableReplicas and .Status.ReadyReplicas
|
||||||
|
// Operation cannot be fulfilled on runnersets.actions.summerwind.dev "example-runnerset": the object has been modified; please apply your changes to the latest version and try again
|
||||||
|
var rd actionsv1alpha1.RunnerDeployment
|
||||||
|
Eventually(func() error {
|
||||||
|
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: name}, &rd)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get test RunnerReplicaSet resource: %v\n", err)
|
||||||
|
}
|
||||||
|
rd.Spec.Replicas = intPtr(2)
|
||||||
|
|
||||||
|
return k8sClient.Update(ctx, &rd)
|
||||||
|
},
|
||||||
|
time.Second*1, time.Millisecond*500).Should(BeNil())
|
||||||
|
|
||||||
|
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() (int, error) {
|
||||||
|
selector, err := metav1.LabelSelectorAsSelector(rd.Spec.Selector)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
err = k8sClient.List(
|
||||||
|
ctx,
|
||||||
|
&runnerSets,
|
||||||
|
client.InNamespace(ns.Name),
|
||||||
|
client.MatchingLabelsSelector{Selector: selector},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if len(runnerSets.Items) != 1 {
|
||||||
|
return 0, fmt.Errorf("runnerreplicasets is not 1 but %d", len(runnerSets.Items))
|
||||||
|
}
|
||||||
|
|
||||||
|
return *runnerSets.Items[0].Spec.Replicas, nil
|
||||||
|
},
|
||||||
|
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(2))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should adopt RunnerReplicaSet created before 0.18.0 to have Spec.Selector", func() {
|
||||||
|
name := "example-runnerdeploy-2"
|
||||||
|
|
||||||
|
{
|
||||||
|
rd := &actionsv1alpha1.RunnerDeployment{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: ns.Name,
|
||||||
|
},
|
||||||
|
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||||
|
Replicas: intPtr(1),
|
||||||
|
Template: actionsv1alpha1.RunnerTemplate{
|
||||||
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
|
Repository: "test/valid",
|
||||||
|
Image: "bar",
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
createRDErr := k8sClient.Create(ctx, rd)
|
||||||
|
Expect(createRDErr).NotTo(HaveOccurred(), "failed to create test RunnerReplicaSet resource")
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() (int, error) {
|
||||||
|
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||||
|
|
||||||
|
err := k8sClient.List(
|
||||||
|
ctx,
|
||||||
|
&runnerSets,
|
||||||
|
client.InNamespace(ns.Name),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(runnerSets.Items), nil
|
||||||
|
},
|
||||||
|
time.Second*1, time.Millisecond*500).Should(BeEquivalentTo(1))
|
||||||
|
|
||||||
|
var rs17 *actionsv1alpha1.RunnerReplicaSet
|
||||||
|
|
||||||
|
Consistently(
|
||||||
|
func() (*metav1.LabelSelector, error) {
|
||||||
|
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||||
|
|
||||||
|
err := k8sClient.List(
|
||||||
|
ctx,
|
||||||
|
&runnerSets,
|
||||||
|
client.InNamespace(ns.Name),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(runnerSets.Items) != 1 {
|
||||||
|
return nil, fmt.Errorf("runnerreplicasets is not 1 but %d", len(runnerSets.Items))
|
||||||
|
}
|
||||||
|
|
||||||
|
rs17 = &runnerSets.Items[0]
|
||||||
|
|
||||||
|
return runnerSets.Items[0].Spec.Selector, nil
|
||||||
|
},
|
||||||
|
time.Second*1, time.Millisecond*500).Should(Not(BeNil()))
|
||||||
|
|
||||||
|
// We simulate the old, pre 0.18.0 RunnerReplicaSet by updating it.
|
||||||
|
// I've tried to use controllerutil.Set{Owner,Controller}Reference and k8sClient.Create(rs17)
|
||||||
|
// but it didn't work due to missing RD UID, where UID is generated on K8s API server on k8sCLient.Create(rd)
|
||||||
|
rs17.Spec.Selector = nil
|
||||||
|
|
||||||
|
updateRSErr := k8sClient.Update(ctx, rs17)
|
||||||
|
Expect(updateRSErr).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() (*metav1.LabelSelector, error) {
|
||||||
|
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||||
|
|
||||||
|
err := k8sClient.List(
|
||||||
|
ctx,
|
||||||
|
&runnerSets,
|
||||||
|
client.InNamespace(ns.Name),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(runnerSets.Items) != 1 {
|
||||||
|
return nil, fmt.Errorf("runnerreplicasets is not 1 but %d", len(runnerSets.Items))
|
||||||
|
}
|
||||||
|
|
||||||
|
return runnerSets.Items[0].Spec.Selector, nil
|
||||||
|
},
|
||||||
|
time.Second*1, time.Millisecond*500).Should(Not(BeNil()))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -18,10 +18,15 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
gogithub "github.com/google/go-github/v33/github"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
@@ -41,6 +46,7 @@ type RunnerReplicaSetReconciler struct {
|
|||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
GitHubClient *github.Client
|
GitHubClient *github.Client
|
||||||
|
Name string
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerreplicasets,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerreplicasets,verbs=get;list;watch;create;update;patch;delete
|
||||||
@@ -63,25 +69,43 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||||
|
if err != nil {
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
// Get the Runners managed by the target RunnerReplicaSet
|
||||||
var allRunners v1alpha1.RunnerList
|
var allRunners v1alpha1.RunnerList
|
||||||
if err := r.List(ctx, &allRunners, client.InNamespace(req.Namespace)); err != nil {
|
if err := r.List(
|
||||||
if !errors.IsNotFound(err) {
|
ctx,
|
||||||
|
&allRunners,
|
||||||
|
client.InNamespace(req.Namespace),
|
||||||
|
client.MatchingLabelsSelector{Selector: selector},
|
||||||
|
); err != nil {
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var myRunners []v1alpha1.Runner
|
var myRunners []v1alpha1.Runner
|
||||||
|
|
||||||
var available, ready int
|
var (
|
||||||
|
current int
|
||||||
|
ready int
|
||||||
|
available int
|
||||||
|
)
|
||||||
|
|
||||||
for _, r := range allRunners.Items {
|
for _, r := range allRunners.Items {
|
||||||
if metav1.IsControlledBy(&r, &rs) {
|
// This guard is required to avoid the RunnerReplicaSet created by the controller v0.17.0 or before
|
||||||
|
// to not treat all the runners in the namespace as its children.
|
||||||
|
if metav1.IsControlledBy(&r, &rs) && !metav1.HasAnnotation(r.ObjectMeta, annotationKeyRegistrationOnly) {
|
||||||
myRunners = append(myRunners, r)
|
myRunners = append(myRunners, r)
|
||||||
|
|
||||||
available += 1
|
current += 1
|
||||||
|
|
||||||
if r.Status.Phase == string(corev1.PodRunning) {
|
if r.Status.Phase == string(corev1.PodRunning) {
|
||||||
ready += 1
|
ready += 1
|
||||||
|
// available is currently the same as ready, as we don't yet have minReadySeconds for runners
|
||||||
|
available += 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -94,40 +118,158 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
desired = 1
|
desired = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
log.V(0).Info("debug", "desired", desired, "available", available)
|
registrationOnlyRunnerNsName := req.NamespacedName
|
||||||
|
registrationOnlyRunnerNsName.Name = registrationOnlyRunnerNameFor(rs.Name)
|
||||||
if available > desired {
|
registrationOnlyRunner := v1alpha1.Runner{}
|
||||||
n := available - desired
|
registrationOnlyRunnerExists := false
|
||||||
|
if err := r.Get(
|
||||||
// get runners that are currently not busy
|
ctx,
|
||||||
var notBusy []v1alpha1.Runner
|
registrationOnlyRunnerNsName,
|
||||||
for _, runner := range myRunners {
|
®istrationOnlyRunner,
|
||||||
busy, err := r.isRunnerBusy(ctx, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
); err != nil {
|
||||||
if err != nil {
|
if !kerrors.IsNotFound(err) {
|
||||||
log.Error(err, "Failed to check if runner is busy")
|
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
if !busy {
|
} else {
|
||||||
notBusy = append(notBusy, runner)
|
registrationOnlyRunnerExists = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// On scale to zero, we must have fully registered registration-only runner before we start deleting other runners, hence `desired == 0`
|
||||||
|
// On scale from zero, we must retain the registratoin-only runner until one or more other runners get registered, hence `registrationOnlyRunnerExists && available == 0`.
|
||||||
|
// On RunnerReplicaSet creation, it have always 0 replics and no registration-only runner.
|
||||||
|
// In this case We don't need to bother creating a registration-only runner which gets deleted soon after we have 1 or more available repolicas,
|
||||||
|
// hence it's not `available == 0`, but `registrationOnlyRunnerExists && available == 0`.
|
||||||
|
// See https://github.com/actions-runner-controller/actions-runner-controller/issues/516
|
||||||
|
registrationOnlyRunnerNeeded := desired == 0 || (registrationOnlyRunnerExists && current == 0)
|
||||||
|
|
||||||
|
if registrationOnlyRunnerNeeded {
|
||||||
|
if registrationOnlyRunnerExists {
|
||||||
|
if registrationOnlyRunner.Status.Phase == "" {
|
||||||
|
log.Info("Still waiting for the registration-only runner to be registered")
|
||||||
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// A registration-only runner does not exist and is needed, hence create it.
|
||||||
|
|
||||||
|
runnerForScaleFromToZero, err := r.newRunner(rs)
|
||||||
|
if err != nil {
|
||||||
|
return ctrl.Result{}, fmt.Errorf("failed to create runner for scale from/to zero: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
runnerForScaleFromToZero.ObjectMeta.Name = registrationOnlyRunnerNsName.Name
|
||||||
|
runnerForScaleFromToZero.ObjectMeta.GenerateName = ""
|
||||||
|
runnerForScaleFromToZero.ObjectMeta.Labels = nil
|
||||||
|
metav1.SetMetaDataAnnotation(&runnerForScaleFromToZero.ObjectMeta, annotationKeyRegistrationOnly, "true")
|
||||||
|
|
||||||
|
if err := r.Client.Create(ctx, &runnerForScaleFromToZero); err != nil {
|
||||||
|
log.Error(err, "Failed to create runner for scale from/to zero")
|
||||||
|
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We can continue to deleting runner pods only after the
|
||||||
|
// registration-only runner gets registered.
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// A registration-only runner exists and is not needed, hence delete it.
|
||||||
|
if registrationOnlyRunnerExists {
|
||||||
|
if err := r.Client.Delete(ctx, ®istrationOnlyRunner); err != nil {
|
||||||
|
log.Error(err, "Retrying soon because we failed to delete registration-only runner")
|
||||||
|
|
||||||
|
return ctrl.Result{Requeue: true}, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(notBusy) < n {
|
if current > desired {
|
||||||
n = len(notBusy)
|
n := current - desired
|
||||||
|
|
||||||
|
log.V(0).Info(fmt.Sprintf("Deleting %d runners", n), "desired", desired, "current", current, "ready", ready)
|
||||||
|
|
||||||
|
// get runners that are currently offline/not busy/timed-out to register
|
||||||
|
var deletionCandidates []v1alpha1.Runner
|
||||||
|
|
||||||
|
for _, runner := range allRunners.Items {
|
||||||
|
busy, err := r.GitHubClient.IsRunnerBusy(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
||||||
|
if err != nil {
|
||||||
|
notRegistered := false
|
||||||
|
offline := false
|
||||||
|
|
||||||
|
var notFoundException *github.RunnerNotFound
|
||||||
|
var offlineException *github.RunnerOffline
|
||||||
|
if errors.As(err, ¬FoundException) {
|
||||||
|
log.V(1).Info("Failed to check if runner is busy. Either this runner has never been successfully registered to GitHub or it still needs more time.", "runnerName", runner.Name)
|
||||||
|
notRegistered = true
|
||||||
|
} else if errors.As(err, &offlineException) {
|
||||||
|
offline = true
|
||||||
|
} else {
|
||||||
|
var e *gogithub.RateLimitError
|
||||||
|
if errors.As(err, &e) {
|
||||||
|
// We log the underlying error when we failed calling GitHub API to list or unregisters,
|
||||||
|
// or the runner is still busy.
|
||||||
|
log.Error(
|
||||||
|
err,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"Failed to check if runner is busy due to GitHub API rate limit. Retrying in %s to avoid excessive GitHub API calls",
|
||||||
|
retryDelayOnGitHubAPIRateLimitError,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
return ctrl.Result{RequeueAfter: retryDelayOnGitHubAPIRateLimitError}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
registrationTimeout := 15 * time.Minute
|
||||||
|
currentTime := time.Now()
|
||||||
|
registrationDidTimeout := currentTime.Sub(runner.CreationTimestamp.Add(registrationTimeout)) > 0
|
||||||
|
|
||||||
|
if notRegistered && registrationDidTimeout {
|
||||||
|
log.Info(
|
||||||
|
"Runner failed to register itself to GitHub in timely manner. "+
|
||||||
|
"Marking the runner for scale down. "+
|
||||||
|
"CAUTION: If you see this a lot, you should investigate the root cause. "+
|
||||||
|
"See https://github.com/summerwind/actions-runner-controller/issues/288",
|
||||||
|
"runnerCreationTimestamp", runner.CreationTimestamp,
|
||||||
|
"currentTime", currentTime,
|
||||||
|
"configuredRegistrationTimeout", registrationTimeout,
|
||||||
|
)
|
||||||
|
|
||||||
|
deletionCandidates = append(deletionCandidates, runner)
|
||||||
|
}
|
||||||
|
|
||||||
|
// offline runners should always be a great target for scale down
|
||||||
|
if offline {
|
||||||
|
deletionCandidates = append(deletionCandidates, runner)
|
||||||
|
}
|
||||||
|
} else if !busy {
|
||||||
|
deletionCandidates = append(deletionCandidates, runner)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(deletionCandidates) < n {
|
||||||
|
n = len(deletionCandidates)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.V(0).Info(fmt.Sprintf("Deleting %d runner(s)", n), "desired", desired, "current", current, "ready", ready)
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
if err := r.Client.Delete(ctx, ¬Busy[i]); err != nil {
|
if err := r.Client.Delete(ctx, &deletionCandidates[i]); client.IgnoreNotFound(err) != nil {
|
||||||
log.Error(err, "Failed to delete runner resource")
|
log.Error(err, "Failed to delete runner resource")
|
||||||
|
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
r.Recorder.Event(&rs, corev1.EventTypeNormal, "RunnerDeleted", fmt.Sprintf("Deleted runner '%s'", myRunners[i].Name))
|
r.Recorder.Event(&rs, corev1.EventTypeNormal, "RunnerDeleted", fmt.Sprintf("Deleted runner '%s'", deletionCandidates[i].Name))
|
||||||
log.Info("Deleted runner", "runnerreplicaset", rs.ObjectMeta.Name)
|
log.Info("Deleted runner")
|
||||||
}
|
}
|
||||||
} else if desired > available {
|
} else if desired > current {
|
||||||
n := desired - available
|
n := desired - current
|
||||||
|
|
||||||
|
log.V(0).Info(fmt.Sprintf("Creating %d runner(s)", n), "desired", desired, "available", current, "ready", ready)
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
newRunner, err := r.newRunner(rs)
|
newRunner, err := r.newRunner(rs)
|
||||||
@@ -145,14 +287,21 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if rs.Status.AvailableReplicas != available || rs.Status.ReadyReplicas != ready {
|
var status v1alpha1.RunnerReplicaSetStatus
|
||||||
updated := rs.DeepCopy()
|
|
||||||
updated.Status.AvailableReplicas = available
|
|
||||||
updated.Status.ReadyReplicas = ready
|
|
||||||
|
|
||||||
if err := r.Status().Update(ctx, updated); err != nil {
|
status.Replicas = ¤t
|
||||||
log.Error(err, "Failed to update runner status")
|
status.AvailableReplicas = &available
|
||||||
return ctrl.Result{}, err
|
status.ReadyReplicas = &ready
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(rs.Status, status) {
|
||||||
|
updated := rs.DeepCopy()
|
||||||
|
updated.Status = status
|
||||||
|
|
||||||
|
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&rs)); err != nil {
|
||||||
|
log.Info("Failed to update runnerreplicaset status. Retrying immediately", "error", err.Error())
|
||||||
|
return ctrl.Result{
|
||||||
|
Requeue: true,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -179,26 +328,20 @@ func (r *RunnerReplicaSetReconciler) newRunner(rs v1alpha1.RunnerReplicaSet) (v1
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *RunnerReplicaSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *RunnerReplicaSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
r.Recorder = mgr.GetEventRecorderFor("runnerreplicaset-controller")
|
name := "runnerreplicaset-controller"
|
||||||
|
if r.Name != "" {
|
||||||
|
name = r.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||||
|
|
||||||
return ctrl.NewControllerManagedBy(mgr).
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
For(&v1alpha1.RunnerReplicaSet{}).
|
For(&v1alpha1.RunnerReplicaSet{}).
|
||||||
Owns(&v1alpha1.Runner{}).
|
Owns(&v1alpha1.Runner{}).
|
||||||
|
Named(name).
|
||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RunnerReplicaSetReconciler) isRunnerBusy(ctx context.Context, org, repo, name string) (bool, error) {
|
func registrationOnlyRunnerNameFor(rsName string) string {
|
||||||
runners, err := r.GitHubClient.ListRunners(ctx, org, repo)
|
return rsName + "-registration-only"
|
||||||
r.Log.Info("runners", "github", runners)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, runner := range runners {
|
|
||||||
if runner.GetName() == name {
|
|
||||||
return runner.GetBusy(), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, fmt.Errorf("runner not found")
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,15 +2,14 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-github/v33/github"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
"k8s.io/utils/pointer"
|
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
|
|
||||||
@@ -47,7 +46,9 @@ func SetupTest(ctx context.Context) *corev1.Namespace {
|
|||||||
err := k8sClient.Create(ctx, ns)
|
err := k8sClient.Create(ctx, ns)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
|
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
|
||||||
|
|
||||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{})
|
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||||
|
Namespace: ns.Name,
|
||||||
|
})
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
||||||
|
|
||||||
runnersList = fake.NewRunnersList()
|
runnersList = fake.NewRunnersList()
|
||||||
@@ -60,6 +61,7 @@ func SetupTest(ctx context.Context) *corev1.Namespace {
|
|||||||
Log: logf.Log,
|
Log: logf.Log,
|
||||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||||
GitHubClient: ghClient,
|
GitHubClient: ghClient,
|
||||||
|
Name: "runnerreplicaset-" + ns.Name,
|
||||||
}
|
}
|
||||||
err = controller.SetupWithManager(mgr)
|
err = controller.SetupWithManager(mgr)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||||
@@ -114,9 +116,19 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
},
|
},
|
||||||
Spec: actionsv1alpha1.RunnerReplicaSetSpec{
|
Spec: actionsv1alpha1.RunnerReplicaSetSpec{
|
||||||
Replicas: intPtr(1),
|
Replicas: intPtr(1),
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
Template: actionsv1alpha1.RunnerTemplate{
|
Template: actionsv1alpha1.RunnerTemplate{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
Spec: actionsv1alpha1.RunnerSpec{
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
Repository: "foo/bar",
|
Repository: "test/valid",
|
||||||
Image: "bar",
|
Image: "bar",
|
||||||
Env: []corev1.EnvVar{
|
Env: []corev1.EnvVar{
|
||||||
{Name: "FOO", Value: "FOOVALUE"},
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
@@ -134,20 +146,29 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
|
|
||||||
Eventually(
|
Eventually(
|
||||||
func() int {
|
func() int {
|
||||||
err := k8sClient.List(ctx, &runners, client.InNamespace(ns.Name))
|
selector, err := metav1.LabelSelectorAsSelector(
|
||||||
|
&metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
logf.Log.Error(err, "failed to create labelselector")
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
err = k8sClient.List(
|
||||||
|
ctx,
|
||||||
|
&runners,
|
||||||
|
client.InNamespace(ns.Name),
|
||||||
|
client.MatchingLabelsSelector{Selector: selector},
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logf.Log.Error(err, "list runners")
|
logf.Log.Error(err, "list runners")
|
||||||
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, runner := range runners.Items {
|
runnersList.Sync(runners.Items)
|
||||||
runnersList.Add(&github.Runner{
|
|
||||||
ID: pointer.Int64Ptr(int64(i) + 1),
|
|
||||||
Name: pointer.StringPtr(runner.Name),
|
|
||||||
OS: pointer.StringPtr("linux"),
|
|
||||||
Status: pointer.StringPtr("online"),
|
|
||||||
Busy: pointer.BoolPtr(false),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return len(runners.Items)
|
return len(runners.Items)
|
||||||
},
|
},
|
||||||
@@ -175,20 +196,28 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
|
|
||||||
Eventually(
|
Eventually(
|
||||||
func() int {
|
func() int {
|
||||||
err := k8sClient.List(ctx, &runners, client.InNamespace(ns.Name))
|
selector, err := metav1.LabelSelectorAsSelector(
|
||||||
|
&metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
logf.Log.Error(err, "failed to create labelselector")
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
err = k8sClient.List(
|
||||||
|
ctx,
|
||||||
|
&runners,
|
||||||
|
client.InNamespace(ns.Name),
|
||||||
|
client.MatchingLabelsSelector{Selector: selector},
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logf.Log.Error(err, "list runners")
|
logf.Log.Error(err, "list runners")
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, runner := range runners.Items {
|
runnersList.Sync(runners.Items)
|
||||||
runnersList.Add(&github.Runner{
|
|
||||||
ID: pointer.Int64Ptr(int64(i) + 1),
|
|
||||||
Name: pointer.StringPtr(runner.Name),
|
|
||||||
OS: pointer.StringPtr("linux"),
|
|
||||||
Status: pointer.StringPtr("online"),
|
|
||||||
Busy: pointer.BoolPtr(false),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return len(runners.Items)
|
return len(runners.Items)
|
||||||
},
|
},
|
||||||
@@ -216,21 +245,36 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
|
|
||||||
Eventually(
|
Eventually(
|
||||||
func() int {
|
func() int {
|
||||||
err := k8sClient.List(ctx, &runners, client.InNamespace(ns.Name))
|
selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{
|
||||||
if err != nil {
|
MatchLabels: map[string]string{
|
||||||
logf.Log.Error(err, "list runners")
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
var regOnly actionsv1alpha1.Runner
|
||||||
|
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: registrationOnlyRunnerNameFor(name)}, ®Only); err != nil {
|
||||||
|
logf.Log.Info(fmt.Sprintf("Failed getting registration-only runner in test: %v", err))
|
||||||
|
return -1
|
||||||
|
} else {
|
||||||
|
updated := regOnly.DeepCopy()
|
||||||
|
updated.Status.Phase = "Completed"
|
||||||
|
|
||||||
|
if err := k8sClient.Status().Patch(ctx, updated, client.MergeFrom(®Only)); err != nil {
|
||||||
|
logf.Log.Info(fmt.Sprintf("Failed updating registration-only runner in test: %v", err))
|
||||||
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, runner := range runners.Items {
|
runnersList.AddOffline([]actionsv1alpha1.Runner{*updated})
|
||||||
runnersList.Add(&github.Runner{
|
|
||||||
ID: pointer.Int64Ptr(int64(i) + 1),
|
|
||||||
Name: pointer.StringPtr(runner.Name),
|
|
||||||
OS: pointer.StringPtr("linux"),
|
|
||||||
Status: pointer.StringPtr("online"),
|
|
||||||
Busy: pointer.BoolPtr(false),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := k8sClient.List(ctx, &runners, client.InNamespace(ns.Name), client.MatchingLabelsSelector{Selector: selector}); err != nil {
|
||||||
|
logf.Log.Error(err, "list runners")
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
runnersList.Sync(runners.Items)
|
||||||
|
|
||||||
return len(runners.Items)
|
return len(runners.Items)
|
||||||
},
|
},
|
||||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(0))
|
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(0))
|
||||||
|
|||||||
122
controllers/schedule.go
Normal file
122
controllers/schedule.go
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/teambition/rrule-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RecurrenceRule struct {
|
||||||
|
Frequency string
|
||||||
|
UntilTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type Period struct {
|
||||||
|
StartTime time.Time
|
||||||
|
EndTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Period) String() string {
|
||||||
|
if r == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.StartTime.Format(time.RFC3339) + "-" + r.EndTime.Format(time.RFC3339)
|
||||||
|
}
|
||||||
|
|
||||||
|
func MatchSchedule(now time.Time, startTime, endTime time.Time, recurrenceRule RecurrenceRule) (*Period, *Period, error) {
|
||||||
|
return calculateActiveAndUpcomingRecurringPeriods(
|
||||||
|
now,
|
||||||
|
startTime,
|
||||||
|
endTime,
|
||||||
|
recurrenceRule.Frequency,
|
||||||
|
recurrenceRule.UntilTime,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func calculateActiveAndUpcomingRecurringPeriods(now, startTime, endTime time.Time, frequency string, untilTime time.Time) (*Period, *Period, error) {
|
||||||
|
var freqValue rrule.Frequency
|
||||||
|
|
||||||
|
var freqDurationDay int
|
||||||
|
var freqDurationMonth int
|
||||||
|
var freqDurationYear int
|
||||||
|
|
||||||
|
switch frequency {
|
||||||
|
case "Daily":
|
||||||
|
freqValue = rrule.DAILY
|
||||||
|
freqDurationDay = 1
|
||||||
|
case "Weekly":
|
||||||
|
freqValue = rrule.WEEKLY
|
||||||
|
freqDurationDay = 7
|
||||||
|
case "Monthly":
|
||||||
|
freqValue = rrule.MONTHLY
|
||||||
|
freqDurationMonth = 1
|
||||||
|
case "Yearly":
|
||||||
|
freqValue = rrule.YEARLY
|
||||||
|
freqDurationYear = 1
|
||||||
|
case "":
|
||||||
|
if now.Before(startTime) {
|
||||||
|
return nil, &Period{StartTime: startTime, EndTime: endTime}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if now.Before(endTime) {
|
||||||
|
return &Period{StartTime: startTime, EndTime: endTime}, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil, nil
|
||||||
|
default:
|
||||||
|
return nil, nil, fmt.Errorf(`invalid freq %q: It must be one of "Daily", "Weekly", "Monthly", and "Yearly"`, frequency)
|
||||||
|
}
|
||||||
|
|
||||||
|
freqDurationLater := time.Date(
|
||||||
|
now.Year()+freqDurationYear,
|
||||||
|
time.Month(int(now.Month())+freqDurationMonth),
|
||||||
|
now.Day()+freqDurationDay,
|
||||||
|
now.Hour(), now.Minute(), now.Second(), now.Nanosecond(), now.Location(),
|
||||||
|
)
|
||||||
|
|
||||||
|
freqDuration := freqDurationLater.Sub(now)
|
||||||
|
|
||||||
|
overrideDuration := endTime.Sub(startTime)
|
||||||
|
if overrideDuration > freqDuration {
|
||||||
|
return nil, nil, fmt.Errorf("override's duration %s must be equal to sor shorter than the duration implied by freq %q (%s)", overrideDuration, frequency, freqDuration)
|
||||||
|
}
|
||||||
|
|
||||||
|
rrule, err := rrule.NewRRule(rrule.ROption{
|
||||||
|
Freq: freqValue,
|
||||||
|
Dtstart: startTime,
|
||||||
|
Until: untilTime,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
overrideDurationBefore := now.Add(-overrideDuration + 1)
|
||||||
|
activeOverrideStarts := rrule.Between(overrideDurationBefore, now, true)
|
||||||
|
|
||||||
|
var active *Period
|
||||||
|
|
||||||
|
if len(activeOverrideStarts) > 1 {
|
||||||
|
return nil, nil, fmt.Errorf("[bug] unexpted number of active overrides found: %v", activeOverrideStarts)
|
||||||
|
} else if len(activeOverrideStarts) == 1 {
|
||||||
|
active = &Period{
|
||||||
|
StartTime: activeOverrideStarts[0],
|
||||||
|
EndTime: activeOverrideStarts[0].Add(overrideDuration),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
oneSecondLater := now.Add(1)
|
||||||
|
upcomingOverrideStarts := rrule.Between(oneSecondLater, freqDurationLater, true)
|
||||||
|
|
||||||
|
var next *Period
|
||||||
|
|
||||||
|
if len(upcomingOverrideStarts) > 0 {
|
||||||
|
next = &Period{
|
||||||
|
StartTime: upcomingOverrideStarts[0],
|
||||||
|
EndTime: upcomingOverrideStarts[0].Add(overrideDuration),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return active, next, nil
|
||||||
|
}
|
||||||
607
controllers/schedule_test.go
Normal file
607
controllers/schedule_test.go
Normal file
@@ -0,0 +1,607 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCalculateActiveAndUpcomingRecurringPeriods(t *testing.T) {
|
||||||
|
type recurrence struct {
|
||||||
|
Start string
|
||||||
|
End string
|
||||||
|
Freq string
|
||||||
|
Until string
|
||||||
|
}
|
||||||
|
|
||||||
|
type testcase struct {
|
||||||
|
now string
|
||||||
|
|
||||||
|
recurrence recurrence
|
||||||
|
|
||||||
|
wantActive string
|
||||||
|
wantUpcoming string
|
||||||
|
}
|
||||||
|
|
||||||
|
check := func(t *testing.T, tc testcase) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
_, err := time.Parse(time.RFC3339, "2021-05-08T00:00:00Z")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
now, err := time.Parse(time.RFC3339, tc.now)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
active, upcoming, err := parseAndMatchRecurringPeriod(now, tc.recurrence.Start, tc.recurrence.End, tc.recurrence.Freq, tc.recurrence.Until)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if active.String() != tc.wantActive {
|
||||||
|
t.Errorf("unexpected active: want %q, got %q", tc.wantActive, active)
|
||||||
|
}
|
||||||
|
|
||||||
|
if upcoming.String() != tc.wantUpcoming {
|
||||||
|
t.Errorf("unexpected upcoming: want %q, got %q", tc.wantUpcoming, upcoming)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("onetime override about to start", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2021-04-30T23:59:59+09:00",
|
||||||
|
|
||||||
|
wantActive: "",
|
||||||
|
wantUpcoming: "2021-05-01T00:00:00+09:00-2021-05-03T00:00:00+09:00",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("onetime override started", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2021-05-01T00:00:00+09:00",
|
||||||
|
|
||||||
|
wantActive: "2021-05-01T00:00:00+09:00-2021-05-03T00:00:00+09:00",
|
||||||
|
wantUpcoming: "",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("onetime override about to end", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2021-05-02T23:59:59+09:00",
|
||||||
|
|
||||||
|
wantActive: "2021-05-01T00:00:00+09:00-2021-05-03T00:00:00+09:00",
|
||||||
|
wantUpcoming: "",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("onetime override ended", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2021-05-03T00:00:00+09:00",
|
||||||
|
|
||||||
|
wantActive: "",
|
||||||
|
wantUpcoming: "",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("weekly override about to start", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Weekly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2021-04-30T23:59:59+09:00",
|
||||||
|
|
||||||
|
wantActive: "",
|
||||||
|
wantUpcoming: "2021-05-01T00:00:00+09:00-2021-05-03T00:00:00+09:00",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("weekly override started", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Weekly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2021-05-01T00:00:00+09:00",
|
||||||
|
|
||||||
|
wantActive: "2021-05-01T00:00:00+09:00-2021-05-03T00:00:00+09:00",
|
||||||
|
wantUpcoming: "2021-05-08T00:00:00+09:00-2021-05-10T00:00:00+09:00",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("weekly override about to end", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Weekly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2021-05-02T23:59:59+09:00",
|
||||||
|
|
||||||
|
wantActive: "2021-05-01T00:00:00+09:00-2021-05-03T00:00:00+09:00",
|
||||||
|
wantUpcoming: "2021-05-08T00:00:00+09:00-2021-05-10T00:00:00+09:00",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("weekly override ended", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Weekly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2021-05-03T00:00:00+09:00",
|
||||||
|
|
||||||
|
wantActive: "",
|
||||||
|
wantUpcoming: "2021-05-08T00:00:00+09:00-2021-05-10T00:00:00+09:00",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("weekly override reccurrence about to start", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Weekly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2021-05-07T23:59:59+09:00",
|
||||||
|
|
||||||
|
wantActive: "",
|
||||||
|
wantUpcoming: "2021-05-08T00:00:00+09:00-2021-05-10T00:00:00+09:00",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("weekly override reccurrence started", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Weekly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2021-05-08T00:00:00+09:00",
|
||||||
|
|
||||||
|
wantActive: "2021-05-08T00:00:00+09:00-2021-05-10T00:00:00+09:00",
|
||||||
|
wantUpcoming: "2021-05-15T00:00:00+09:00-2021-05-17T00:00:00+09:00",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("weekly override reccurrence about to end", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Weekly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2021-05-09T23:59:59+09:00",
|
||||||
|
|
||||||
|
wantActive: "2021-05-08T00:00:00+09:00-2021-05-10T00:00:00+09:00",
|
||||||
|
wantUpcoming: "2021-05-15T00:00:00+09:00-2021-05-17T00:00:00+09:00",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("weekly override reccurrence ended", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Weekly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2021-05-10T00:00:00+09:00",
|
||||||
|
|
||||||
|
wantActive: "",
|
||||||
|
wantUpcoming: "2021-05-15T00:00:00+09:00-2021-05-17T00:00:00+09:00",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("weekly override's last reccurrence about to start", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Weekly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2022-04-29T23:59:59+09:00",
|
||||||
|
|
||||||
|
wantActive: "",
|
||||||
|
wantUpcoming: "2022-04-30T00:00:00+09:00-2022-05-02T00:00:00+09:00",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("weekly override reccurrence started", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Weekly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2022-04-30T00:00:00+09:00",
|
||||||
|
|
||||||
|
wantActive: "2022-04-30T00:00:00+09:00-2022-05-02T00:00:00+09:00",
|
||||||
|
wantUpcoming: "",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("weekly override reccurrence about to end", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Weekly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2022-05-01T23:59:59+09:00",
|
||||||
|
|
||||||
|
wantActive: "2022-04-30T00:00:00+09:00-2022-05-02T00:00:00+09:00",
|
||||||
|
wantUpcoming: "",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("weekly override reccurrence ended", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Weekly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2022-05-02T00:00:00+09:00",
|
||||||
|
|
||||||
|
wantActive: "",
|
||||||
|
wantUpcoming: "",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("weekly override repeated forever started", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Weekly",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2021-05-08T00:00:00+09:00",
|
||||||
|
|
||||||
|
wantActive: "2021-05-08T00:00:00+09:00-2021-05-10T00:00:00+09:00",
|
||||||
|
wantUpcoming: "2021-05-15T00:00:00+09:00-2021-05-17T00:00:00+09:00",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("monthly override started", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Monthly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2021-05-01T00:00:00+09:00",
|
||||||
|
|
||||||
|
wantActive: "2021-05-01T00:00:00+09:00-2021-05-03T00:00:00+09:00",
|
||||||
|
wantUpcoming: "2021-06-01T00:00:00+09:00-2021-06-03T00:00:00+09:00",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("monthly override recurrence started", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Monthly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2021-06-01T00:00:00+09:00",
|
||||||
|
|
||||||
|
wantActive: "2021-06-01T00:00:00+09:00-2021-06-03T00:00:00+09:00",
|
||||||
|
wantUpcoming: "2021-07-01T00:00:00+09:00-2021-07-03T00:00:00+09:00",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("monthly override's last reccurence about to start", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Monthly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2022-04-30T23:59:59+09:00",
|
||||||
|
|
||||||
|
wantActive: "",
|
||||||
|
wantUpcoming: "2022-05-01T00:00:00+09:00-2022-05-03T00:00:00+09:00",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("monthly override's last reccurence started", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Monthly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2022-05-01T00:00:00+09:00",
|
||||||
|
|
||||||
|
wantActive: "2022-05-01T00:00:00+09:00-2022-05-03T00:00:00+09:00",
|
||||||
|
wantUpcoming: "",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("monthly override's last reccurence started", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Monthly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2022-05-01T00:00:01+09:00",
|
||||||
|
|
||||||
|
wantActive: "2022-05-01T00:00:00+09:00-2022-05-03T00:00:00+09:00",
|
||||||
|
wantUpcoming: "",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("monthly override's last reccurence ending", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Monthly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2022-05-02T23:59:59+09:00",
|
||||||
|
|
||||||
|
wantActive: "2022-05-01T00:00:00+09:00-2022-05-03T00:00:00+09:00",
|
||||||
|
wantUpcoming: "",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("monthly override's last reccurence ended", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Monthly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2022-05-03T00:00:00+09:00",
|
||||||
|
|
||||||
|
wantActive: "",
|
||||||
|
wantUpcoming: "",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("yearly override started", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Yearly",
|
||||||
|
Until: "2022-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2021-05-01T00:00:00+09:00",
|
||||||
|
|
||||||
|
wantActive: "2021-05-01T00:00:00+09:00-2021-05-03T00:00:00+09:00",
|
||||||
|
wantUpcoming: "2022-05-01T00:00:00+09:00-2022-05-03T00:00:00+09:00",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("yearly override reccurrence started", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Yearly",
|
||||||
|
Until: "2023-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2022-05-01T00:00:00+09:00",
|
||||||
|
|
||||||
|
wantActive: "2022-05-01T00:00:00+09:00-2022-05-03T00:00:00+09:00",
|
||||||
|
wantUpcoming: "2023-05-01T00:00:00+09:00-2023-05-03T00:00:00+09:00",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("yearly override's last recurrence about to start", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Yearly",
|
||||||
|
Until: "2023-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2023-04-30T23:59:59+09:00",
|
||||||
|
|
||||||
|
wantActive: "",
|
||||||
|
wantUpcoming: "2023-05-01T00:00:00+09:00-2023-05-03T00:00:00+09:00",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("yearly override's last recurrence started", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Yearly",
|
||||||
|
Until: "2023-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2023-05-01T00:00:00+09:00",
|
||||||
|
|
||||||
|
wantActive: "2023-05-01T00:00:00+09:00-2023-05-03T00:00:00+09:00",
|
||||||
|
wantUpcoming: "",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("yearly override's last recurrence ending", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Yearly",
|
||||||
|
Until: "2023-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2023-05-02T23:23:59+09:00",
|
||||||
|
|
||||||
|
wantActive: "2023-05-01T00:00:00+09:00-2023-05-03T00:00:00+09:00",
|
||||||
|
wantUpcoming: "",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("yearly override's last recurrence ended", func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
check(t, testcase{
|
||||||
|
recurrence: recurrence{
|
||||||
|
Start: "2021-05-01T00:00:00+09:00",
|
||||||
|
End: "2021-05-03T00:00:00+09:00",
|
||||||
|
Freq: "Yearly",
|
||||||
|
Until: "2023-05-01T00:00:00+09:00",
|
||||||
|
},
|
||||||
|
|
||||||
|
now: "2023-05-03T00:00:00+09:00",
|
||||||
|
|
||||||
|
wantActive: "",
|
||||||
|
wantUpcoming: "",
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseAndMatchRecurringPeriod(now time.Time, start, end, frequency, until string) (*Period, *Period, error) {
|
||||||
|
startTime, err := time.Parse(time.RFC3339, start)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
endTime, err := time.Parse(time.RFC3339, end)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var untilTime time.Time
|
||||||
|
|
||||||
|
if until != "" {
|
||||||
|
ut, err := time.Parse(time.RFC3339, until)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
untilTime = ut
|
||||||
|
}
|
||||||
|
|
||||||
|
return MatchSchedule(now, startTime, endTime, RecurrenceRule{Frequency: frequency, UntilTime: untilTime})
|
||||||
|
}
|
||||||
@@ -17,6 +17,8 @@ limitations under the License.
|
|||||||
package controllers
|
package controllers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -43,6 +45,8 @@ var testEnv *envtest.Environment
|
|||||||
func TestAPIs(t *testing.T) {
|
func TestAPIs(t *testing.T) {
|
||||||
RegisterFailHandler(Fail)
|
RegisterFailHandler(Fail)
|
||||||
|
|
||||||
|
config.GinkgoConfig.FocusString = os.Getenv("GINKGO_FOCUS")
|
||||||
|
|
||||||
RunSpecsWithDefaultAndCustomReporters(t,
|
RunSpecsWithDefaultAndCustomReporters(t,
|
||||||
"Controller Suite",
|
"Controller Suite",
|
||||||
[]Reporter{envtest.NewlineReporter{}})
|
[]Reporter{envtest.NewlineReporter{}})
|
||||||
@@ -51,9 +55,17 @@ func TestAPIs(t *testing.T) {
|
|||||||
var _ = BeforeSuite(func(done Done) {
|
var _ = BeforeSuite(func(done Done) {
|
||||||
logf.SetLogger(zap.LoggerTo(GinkgoWriter, true))
|
logf.SetLogger(zap.LoggerTo(GinkgoWriter, true))
|
||||||
|
|
||||||
|
var apiServerFlags []string
|
||||||
|
|
||||||
|
apiServerFlags = append(apiServerFlags, envtest.DefaultKubeAPIServerFlags...)
|
||||||
|
// Avoids the following error:
|
||||||
|
// 2021-03-19T15:14:11.673+0900 ERROR controller-runtime.controller Reconciler error {"controller": "testns-tvjzjrunner", "request": "testns-gdnyx/example-runnerdeploy-zps4z-j5562", "error": "Pod \"example-runnerdeploy-zps4z-j5562\" is invalid: [spec.containers[1].image: Required value, spec.containers[1].securityContext.privileged: Forbidden: disallowed by cluster policy]"}
|
||||||
|
apiServerFlags = append(apiServerFlags, "--allow-privileged=true")
|
||||||
|
|
||||||
By("bootstrapping test environment")
|
By("bootstrapping test environment")
|
||||||
testEnv = &envtest.Environment{
|
testEnv = &envtest.Environment{
|
||||||
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
|
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
|
||||||
|
KubeAPIServerFlags: apiServerFlags,
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
|||||||
373
controllers/testdata/org_webhook_check_run_payload.json
vendored
Normal file
373
controllers/testdata/org_webhook_check_run_payload.json
vendored
Normal file
@@ -0,0 +1,373 @@
|
|||||||
|
{
|
||||||
|
"action": "created",
|
||||||
|
"check_run": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"head_sha": "1234567890123456789012345678901234567890",
|
||||||
|
"external_id": "92058b04-f16a-5035-546c-cae3ad5e2f5f",
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO/check-runs/123467890",
|
||||||
|
"html_url": "https://github.com/MYORG/MYREPO/runs/123467890",
|
||||||
|
"details_url": "https://github.com/MYORG/MYREPO/runs/123467890",
|
||||||
|
"status": "queued",
|
||||||
|
"conclusion": null,
|
||||||
|
"started_at": "2021-02-18T06:16:31Z",
|
||||||
|
"completed_at": null,
|
||||||
|
"output": {
|
||||||
|
"title": null,
|
||||||
|
"summary": null,
|
||||||
|
"text": null,
|
||||||
|
"annotations_count": 0,
|
||||||
|
"annotations_url": "https://api.github.com/repos/MYORG/MYREPO/check-runs/123467890/annotations"
|
||||||
|
},
|
||||||
|
"name": "validate",
|
||||||
|
"check_suite": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"head_branch": "MYNAME/actions-runner-controller-webhook",
|
||||||
|
"head_sha": "1234567890123456789012345678901234567890",
|
||||||
|
"status": "queued",
|
||||||
|
"conclusion": null,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO/check-suites/1234567890",
|
||||||
|
"before": "1234567890123456789012345678901234567890",
|
||||||
|
"after": "1234567890123456789012345678901234567890",
|
||||||
|
"pull_requests": [
|
||||||
|
{
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO/pulls/2033",
|
||||||
|
"id": 1234567890,
|
||||||
|
"number": 1234567890,
|
||||||
|
"head": {
|
||||||
|
"ref": "feature",
|
||||||
|
"sha": "1234567890123456789012345678901234567890",
|
||||||
|
"repo": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO",
|
||||||
|
"name": "MYREPO"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"base": {
|
||||||
|
"ref": "master",
|
||||||
|
"sha": "1234567890123456789012345678901234567890",
|
||||||
|
"repo": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO",
|
||||||
|
"name": "MYREPO"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"app": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"slug": "github-actions",
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"owner": {
|
||||||
|
"login": "github",
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/123467890?v=4",
|
||||||
|
"gravatar_id": "",
|
||||||
|
"url": "https://api.github.com/users/github",
|
||||||
|
"html_url": "https://github.com/github",
|
||||||
|
"followers_url": "https://api.github.com/users/github/followers",
|
||||||
|
"following_url": "https://api.github.com/users/github/following{/other_user}",
|
||||||
|
"gists_url": "https://api.github.com/users/github/gists{/gist_id}",
|
||||||
|
"starred_url": "https://api.github.com/users/github/starred{/owner}{/repo}",
|
||||||
|
"subscriptions_url": "https://api.github.com/users/github/subscriptions",
|
||||||
|
"organizations_url": "https://api.github.com/users/github/orgs",
|
||||||
|
"repos_url": "https://api.github.com/users/github/repos",
|
||||||
|
"events_url": "https://api.github.com/users/github/events{/privacy}",
|
||||||
|
"received_events_url": "https://api.github.com/users/github/received_events",
|
||||||
|
"type": "Organization",
|
||||||
|
"site_admin": false
|
||||||
|
},
|
||||||
|
"name": "GitHub Actions",
|
||||||
|
"description": "Automate your workflow from idea to production",
|
||||||
|
"external_url": "https://help.github.com/en/actions",
|
||||||
|
"html_url": "https://github.com/apps/github-actions",
|
||||||
|
"created_at": "2018-07-30T09:30:17Z",
|
||||||
|
"updated_at": "2019-12-10T19:04:12Z",
|
||||||
|
"permissions": {
|
||||||
|
"actions": "write",
|
||||||
|
"checks": "write",
|
||||||
|
"contents": "write",
|
||||||
|
"deployments": "write",
|
||||||
|
"issues": "write",
|
||||||
|
"metadata": "read",
|
||||||
|
"organization_packages": "write",
|
||||||
|
"packages": "write",
|
||||||
|
"pages": "write",
|
||||||
|
"pull_requests": "write",
|
||||||
|
"repository_hooks": "write",
|
||||||
|
"repository_projects": "write",
|
||||||
|
"security_events": "write",
|
||||||
|
"statuses": "write",
|
||||||
|
"vulnerability_alerts": "read"
|
||||||
|
},
|
||||||
|
"events": [
|
||||||
|
"check_run",
|
||||||
|
"check_suite",
|
||||||
|
"create",
|
||||||
|
"delete",
|
||||||
|
"deployment",
|
||||||
|
"deployment_status",
|
||||||
|
"fork",
|
||||||
|
"gollum",
|
||||||
|
"issues",
|
||||||
|
"issue_comment",
|
||||||
|
"label",
|
||||||
|
"milestone",
|
||||||
|
"page_build",
|
||||||
|
"project",
|
||||||
|
"project_card",
|
||||||
|
"project_column",
|
||||||
|
"public",
|
||||||
|
"pull_request",
|
||||||
|
"pull_request_review",
|
||||||
|
"pull_request_review_comment",
|
||||||
|
"push",
|
||||||
|
"registry_package",
|
||||||
|
"release",
|
||||||
|
"repository",
|
||||||
|
"repository_dispatch",
|
||||||
|
"status",
|
||||||
|
"watch",
|
||||||
|
"workflow_dispatch",
|
||||||
|
"workflow_run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"created_at": "2021-02-18T06:15:32Z",
|
||||||
|
"updated_at": "2021-02-18T06:16:31Z"
|
||||||
|
},
|
||||||
|
"app": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"slug": "github-actions",
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"owner": {
|
||||||
|
"login": "github",
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/1234567890?v=4",
|
||||||
|
"gravatar_id": "",
|
||||||
|
"url": "https://api.github.com/users/github",
|
||||||
|
"html_url": "https://github.com/github",
|
||||||
|
"followers_url": "https://api.github.com/users/github/followers",
|
||||||
|
"following_url": "https://api.github.com/users/github/following{/other_user}",
|
||||||
|
"gists_url": "https://api.github.com/users/github/gists{/gist_id}",
|
||||||
|
"starred_url": "https://api.github.com/users/github/starred{/owner}{/repo}",
|
||||||
|
"subscriptions_url": "https://api.github.com/users/github/subscriptions",
|
||||||
|
"organizations_url": "https://api.github.com/users/github/orgs",
|
||||||
|
"repos_url": "https://api.github.com/users/github/repos",
|
||||||
|
"events_url": "https://api.github.com/users/github/events{/privacy}",
|
||||||
|
"received_events_url": "https://api.github.com/users/github/received_events",
|
||||||
|
"type": "Organization",
|
||||||
|
"site_admin": false
|
||||||
|
},
|
||||||
|
"name": "GitHub Actions",
|
||||||
|
"description": "Automate your workflow from idea to production",
|
||||||
|
"external_url": "https://help.github.com/en/actions",
|
||||||
|
"html_url": "https://github.com/apps/github-actions",
|
||||||
|
"created_at": "2018-07-30T09:30:17Z",
|
||||||
|
"updated_at": "2019-12-10T19:04:12Z",
|
||||||
|
"permissions": {
|
||||||
|
"actions": "write",
|
||||||
|
"checks": "write",
|
||||||
|
"contents": "write",
|
||||||
|
"deployments": "write",
|
||||||
|
"issues": "write",
|
||||||
|
"metadata": "read",
|
||||||
|
"organization_packages": "write",
|
||||||
|
"packages": "write",
|
||||||
|
"pages": "write",
|
||||||
|
"pull_requests": "write",
|
||||||
|
"repository_hooks": "write",
|
||||||
|
"repository_projects": "write",
|
||||||
|
"security_events": "write",
|
||||||
|
"statuses": "write",
|
||||||
|
"vulnerability_alerts": "read"
|
||||||
|
},
|
||||||
|
"events": [
|
||||||
|
"check_run",
|
||||||
|
"check_suite",
|
||||||
|
"create",
|
||||||
|
"delete",
|
||||||
|
"deployment",
|
||||||
|
"deployment_status",
|
||||||
|
"fork",
|
||||||
|
"gollum",
|
||||||
|
"issues",
|
||||||
|
"issue_comment",
|
||||||
|
"label",
|
||||||
|
"milestone",
|
||||||
|
"page_build",
|
||||||
|
"project",
|
||||||
|
"project_card",
|
||||||
|
"project_column",
|
||||||
|
"public",
|
||||||
|
"pull_request",
|
||||||
|
"pull_request_review",
|
||||||
|
"pull_request_review_comment",
|
||||||
|
"push",
|
||||||
|
"registry_package",
|
||||||
|
"release",
|
||||||
|
"repository",
|
||||||
|
"repository_dispatch",
|
||||||
|
"status",
|
||||||
|
"watch",
|
||||||
|
"workflow_dispatch",
|
||||||
|
"workflow_run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"pull_requests": [
|
||||||
|
{
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO/pulls/1234567890",
|
||||||
|
"id": 1234567890,
|
||||||
|
"number": 1234567890,
|
||||||
|
"head": {
|
||||||
|
"ref": "feature",
|
||||||
|
"sha": "1234567890123456789012345678901234567890",
|
||||||
|
"repo": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO",
|
||||||
|
"name": "MYREPO"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"base": {
|
||||||
|
"ref": "master",
|
||||||
|
"sha": "1234567890123456789012345678901234567890",
|
||||||
|
"repo": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO",
|
||||||
|
"name": "MYREPO"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"repository": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"name": "MYREPO",
|
||||||
|
"full_name": "MYORG/MYREPO",
|
||||||
|
"private": true,
|
||||||
|
"owner": {
|
||||||
|
"login": "MYORG",
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/1234567890?v=4",
|
||||||
|
"gravatar_id": "",
|
||||||
|
"url": "https://api.github.com/users/MYORG",
|
||||||
|
"html_url": "https://github.com/MYORG",
|
||||||
|
"followers_url": "https://api.github.com/users/MYORG/followers",
|
||||||
|
"following_url": "https://api.github.com/users/MYORG/following{/other_user}",
|
||||||
|
"gists_url": "https://api.github.com/users/MYORG/gists{/gist_id}",
|
||||||
|
"starred_url": "https://api.github.com/users/MYORG/starred{/owner}{/repo}",
|
||||||
|
"subscriptions_url": "https://api.github.com/users/MYORG/subscriptions",
|
||||||
|
"organizations_url": "https://api.github.com/users/MYORG/orgs",
|
||||||
|
"repos_url": "https://api.github.com/users/MYORG/repos",
|
||||||
|
"events_url": "https://api.github.com/users/MYORG/events{/privacy}",
|
||||||
|
"received_events_url": "https://api.github.com/users/MYORG/received_events",
|
||||||
|
"type": "Organization",
|
||||||
|
"site_admin": false
|
||||||
|
},
|
||||||
|
"html_url": "https://github.com/MYORG/MYREPO",
|
||||||
|
"description": "MYREPO",
|
||||||
|
"fork": false,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO",
|
||||||
|
"forks_url": "https://api.github.com/repos/MYORG/MYREPO/forks",
|
||||||
|
"keys_url": "https://api.github.com/repos/MYORG/MYREPO/keys{/key_id}",
|
||||||
|
"collaborators_url": "https://api.github.com/repos/MYORG/MYREPO/collaborators{/collaborator}",
|
||||||
|
"teams_url": "https://api.github.com/repos/MYORG/MYREPO/teams",
|
||||||
|
"hooks_url": "https://api.github.com/repos/MYORG/MYREPO/hooks",
|
||||||
|
"issue_events_url": "https://api.github.com/repos/MYORG/MYREPO/issues/events{/number}",
|
||||||
|
"events_url": "https://api.github.com/repos/MYORG/MYREPO/events",
|
||||||
|
"assignees_url": "https://api.github.com/repos/MYORG/MYREPO/assignees{/user}",
|
||||||
|
"branches_url": "https://api.github.com/repos/MYORG/MYREPO/branches{/branch}",
|
||||||
|
"tags_url": "https://api.github.com/repos/MYORG/MYREPO/tags",
|
||||||
|
"blobs_url": "https://api.github.com/repos/MYORG/MYREPO/git/blobs{/sha}",
|
||||||
|
"git_tags_url": "https://api.github.com/repos/MYORG/MYREPO/git/tags{/sha}",
|
||||||
|
"git_refs_url": "https://api.github.com/repos/MYORG/MYREPO/git/refs{/sha}",
|
||||||
|
"trees_url": "https://api.github.com/repos/MYORG/MYREPO/git/trees{/sha}",
|
||||||
|
"statuses_url": "https://api.github.com/repos/MYORG/MYREPO/statuses/{sha}",
|
||||||
|
"languages_url": "https://api.github.com/repos/MYORG/MYREPO/languages",
|
||||||
|
"stargazers_url": "https://api.github.com/repos/MYORG/MYREPO/stargazers",
|
||||||
|
"contributors_url": "https://api.github.com/repos/MYORG/MYREPO/contributors",
|
||||||
|
"subscribers_url": "https://api.github.com/repos/MYORG/MYREPO/subscribers",
|
||||||
|
"subscription_url": "https://api.github.com/repos/MYORG/MYREPO/subscription",
|
||||||
|
"commits_url": "https://api.github.com/repos/MYORG/MYREPO/commits{/sha}",
|
||||||
|
"git_commits_url": "https://api.github.com/repos/MYORG/MYREPO/git/commits{/sha}",
|
||||||
|
"comments_url": "https://api.github.com/repos/MYORG/MYREPO/comments{/number}",
|
||||||
|
"issue_comment_url": "https://api.github.com/repos/MYORG/MYREPO/issues/comments{/number}",
|
||||||
|
"contents_url": "https://api.github.com/repos/MYORG/MYREPO/contents/{+path}",
|
||||||
|
"compare_url": "https://api.github.com/repos/MYORG/MYREPO/compare/{base}...{head}",
|
||||||
|
"merges_url": "https://api.github.com/repos/MYORG/MYREPO/merges",
|
||||||
|
"archive_url": "https://api.github.com/repos/MYORG/MYREPO/{archive_format}{/ref}",
|
||||||
|
"downloads_url": "https://api.github.com/repos/MYORG/MYREPO/downloads",
|
||||||
|
"issues_url": "https://api.github.com/repos/MYORG/MYREPO/issues{/number}",
|
||||||
|
"pulls_url": "https://api.github.com/repos/MYORG/MYREPO/pulls{/number}",
|
||||||
|
"milestones_url": "https://api.github.com/repos/MYORG/MYREPO/milestones{/number}",
|
||||||
|
"notifications_url": "https://api.github.com/repos/MYORG/MYREPO/notifications{?since,all,participating}",
|
||||||
|
"labels_url": "https://api.github.com/repos/MYORG/MYREPO/labels{/name}",
|
||||||
|
"releases_url": "https://api.github.com/repos/MYORG/MYREPO/releases{/id}",
|
||||||
|
"deployments_url": "https://api.github.com/repos/MYORG/MYREPO/deployments",
|
||||||
|
"created_at": "2017-08-10T02:21:10Z",
|
||||||
|
"updated_at": "2021-02-18T04:40:55Z",
|
||||||
|
"pushed_at": "2021-02-18T06:15:30Z",
|
||||||
|
"git_url": "git://github.com/MYORG/MYREPO.git",
|
||||||
|
"ssh_url": "git@github.com:MYORG/MYREPO.git",
|
||||||
|
"clone_url": "https://github.com/MYORG/MYREPO.git",
|
||||||
|
"svn_url": "https://github.com/MYORG/MYREPO",
|
||||||
|
"homepage": null,
|
||||||
|
"size": 30782,
|
||||||
|
"stargazers_count": 2,
|
||||||
|
"watchers_count": 2,
|
||||||
|
"language": "Shell",
|
||||||
|
"has_issues": false,
|
||||||
|
"has_projects": true,
|
||||||
|
"has_downloads": true,
|
||||||
|
"has_wiki": false,
|
||||||
|
"has_pages": false,
|
||||||
|
"forks_count": 0,
|
||||||
|
"mirror_url": null,
|
||||||
|
"archived": false,
|
||||||
|
"disabled": false,
|
||||||
|
"open_issues_count": 6,
|
||||||
|
"license": null,
|
||||||
|
"forks": 0,
|
||||||
|
"open_issues": 6,
|
||||||
|
"watchers": 2,
|
||||||
|
"default_branch": "master"
|
||||||
|
},
|
||||||
|
"organization": {
|
||||||
|
"login": "MYORG",
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"url": "https://api.github.com/orgs/MYORG",
|
||||||
|
"repos_url": "https://api.github.com/orgs/MYORG/repos",
|
||||||
|
"events_url": "https://api.github.com/orgs/MYORG/events",
|
||||||
|
"hooks_url": "https://api.github.com/orgs/MYORG/hooks",
|
||||||
|
"issues_url": "https://api.github.com/orgs/MYORG/issues",
|
||||||
|
"members_url": "https://api.github.com/orgs/MYORG/members{/member}",
|
||||||
|
"public_members_url": "https://api.github.com/orgs/MYORG/public_members{/member}",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/1234567890?v=4",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
"sender": {
|
||||||
|
"login": "MYNAME",
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/1234567890?v=4",
|
||||||
|
"gravatar_id": "",
|
||||||
|
"url": "https://api.github.com/users/MYNAME",
|
||||||
|
"html_url": "https://github.com/MYNAME",
|
||||||
|
"followers_url": "https://api.github.com/users/MYNAME/followers",
|
||||||
|
"following_url": "https://api.github.com/users/MYNAME/following{/other_user}",
|
||||||
|
"gists_url": "https://api.github.com/users/MYNAME/gists{/gist_id}",
|
||||||
|
"starred_url": "https://api.github.com/users/MYNAME/starred{/owner}{/repo}",
|
||||||
|
"subscriptions_url": "https://api.github.com/users/MYNAME/subscriptions",
|
||||||
|
"organizations_url": "https://api.github.com/users/MYNAME/orgs",
|
||||||
|
"repos_url": "https://api.github.com/users/MYNAME/repos",
|
||||||
|
"events_url": "https://api.github.com/users/MYNAME/events{/privacy}",
|
||||||
|
"received_events_url": "https://api.github.com/users/MYNAME/received_events",
|
||||||
|
"type": "User",
|
||||||
|
"site_admin": false
|
||||||
|
}
|
||||||
|
}
|
||||||
360
controllers/testdata/repo_webhook_check_run_payload.json
vendored
Normal file
360
controllers/testdata/repo_webhook_check_run_payload.json
vendored
Normal file
@@ -0,0 +1,360 @@
|
|||||||
|
{
|
||||||
|
"action": "completed",
|
||||||
|
"check_run": {
|
||||||
|
"id": 1949438388,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"head_sha": "1234567890123456789012345678901234567890",
|
||||||
|
"external_id": "ca395085-040a-526b-2ce8-bdc85f692774",
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO/check-runs/123467890",
|
||||||
|
"html_url": "https://github.com/MYORG/MYREPO/runs/123467890",
|
||||||
|
"details_url": "https://github.com/MYORG/MYREPO/runs/123467890",
|
||||||
|
"status": "queued",
|
||||||
|
"conclusion": null,
|
||||||
|
"started_at": "2021-02-18T06:16:31Z",
|
||||||
|
"completed_at": null,
|
||||||
|
"output": {
|
||||||
|
"title": null,
|
||||||
|
"summary": null,
|
||||||
|
"text": null,
|
||||||
|
"annotations_count": 0,
|
||||||
|
"annotations_url": "https://api.github.com/repos/MYORG/MYREPO/check-runs/123467890/annotations"
|
||||||
|
},
|
||||||
|
"name": "build",
|
||||||
|
"name": "validate",
|
||||||
|
"check_suite": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"head_branch": "MYNAME/actions-runner-controller-webhook",
|
||||||
|
"head_sha": "1234567890123456789012345678901234567890",
|
||||||
|
"status": "queued",
|
||||||
|
"conclusion": null,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO/check-suites/1234567890",
|
||||||
|
"before": "1234567890123456789012345678901234567890",
|
||||||
|
"after": "1234567890123456789012345678901234567890",
|
||||||
|
"pull_requests": [
|
||||||
|
{
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO/pulls/2033",
|
||||||
|
"id": 1234567890,
|
||||||
|
"number": 1234567890,
|
||||||
|
"head": {
|
||||||
|
"ref": "feature",
|
||||||
|
"sha": "1234567890123456789012345678901234567890",
|
||||||
|
"repo": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO",
|
||||||
|
"name": "MYREPO"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"base": {
|
||||||
|
"ref": "master",
|
||||||
|
"sha": "1234567890123456789012345678901234567890",
|
||||||
|
"repo": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO",
|
||||||
|
"name": "MYREPO"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"app": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"slug": "github-actions",
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"owner": {
|
||||||
|
"login": "github",
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/123467890?v=4",
|
||||||
|
"gravatar_id": "",
|
||||||
|
"url": "https://api.github.com/users/github",
|
||||||
|
"html_url": "https://github.com/github",
|
||||||
|
"followers_url": "https://api.github.com/users/github/followers",
|
||||||
|
"following_url": "https://api.github.com/users/github/following{/other_user}",
|
||||||
|
"gists_url": "https://api.github.com/users/github/gists{/gist_id}",
|
||||||
|
"starred_url": "https://api.github.com/users/github/starred{/owner}{/repo}",
|
||||||
|
"subscriptions_url": "https://api.github.com/users/github/subscriptions",
|
||||||
|
"organizations_url": "https://api.github.com/users/github/orgs",
|
||||||
|
"repos_url": "https://api.github.com/users/github/repos",
|
||||||
|
"events_url": "https://api.github.com/users/github/events{/privacy}",
|
||||||
|
"received_events_url": "https://api.github.com/users/github/received_events",
|
||||||
|
"type": "Organization",
|
||||||
|
"site_admin": false
|
||||||
|
},
|
||||||
|
"name": "GitHub Actions",
|
||||||
|
"description": "Automate your workflow from idea to production",
|
||||||
|
"external_url": "https://help.github.com/en/actions",
|
||||||
|
"html_url": "https://github.com/apps/github-actions",
|
||||||
|
"created_at": "2018-07-30T09:30:17Z",
|
||||||
|
"updated_at": "2019-12-10T19:04:12Z",
|
||||||
|
"permissions": {
|
||||||
|
"actions": "write",
|
||||||
|
"checks": "write",
|
||||||
|
"contents": "write",
|
||||||
|
"deployments": "write",
|
||||||
|
"issues": "write",
|
||||||
|
"metadata": "read",
|
||||||
|
"organization_packages": "write",
|
||||||
|
"packages": "write",
|
||||||
|
"pages": "write",
|
||||||
|
"pull_requests": "write",
|
||||||
|
"repository_hooks": "write",
|
||||||
|
"repository_projects": "write",
|
||||||
|
"security_events": "write",
|
||||||
|
"statuses": "write",
|
||||||
|
"vulnerability_alerts": "read"
|
||||||
|
},
|
||||||
|
"events": [
|
||||||
|
"check_run",
|
||||||
|
"check_suite",
|
||||||
|
"create",
|
||||||
|
"delete",
|
||||||
|
"deployment",
|
||||||
|
"deployment_status",
|
||||||
|
"fork",
|
||||||
|
"gollum",
|
||||||
|
"issues",
|
||||||
|
"issue_comment",
|
||||||
|
"label",
|
||||||
|
"milestone",
|
||||||
|
"page_build",
|
||||||
|
"project",
|
||||||
|
"project_card",
|
||||||
|
"project_column",
|
||||||
|
"public",
|
||||||
|
"pull_request",
|
||||||
|
"pull_request_review",
|
||||||
|
"pull_request_review_comment",
|
||||||
|
"push",
|
||||||
|
"registry_package",
|
||||||
|
"release",
|
||||||
|
"repository",
|
||||||
|
"repository_dispatch",
|
||||||
|
"status",
|
||||||
|
"watch",
|
||||||
|
"workflow_dispatch",
|
||||||
|
"workflow_run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"created_at": "2021-02-18T06:15:32Z",
|
||||||
|
"updated_at": "2021-02-18T06:16:31Z"
|
||||||
|
},
|
||||||
|
"app": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"slug": "github-actions",
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"owner": {
|
||||||
|
"login": "github",
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/1234567890?v=4",
|
||||||
|
"gravatar_id": "",
|
||||||
|
"url": "https://api.github.com/users/github",
|
||||||
|
"html_url": "https://github.com/github",
|
||||||
|
"followers_url": "https://api.github.com/users/github/followers",
|
||||||
|
"following_url": "https://api.github.com/users/github/following{/other_user}",
|
||||||
|
"gists_url": "https://api.github.com/users/github/gists{/gist_id}",
|
||||||
|
"starred_url": "https://api.github.com/users/github/starred{/owner}{/repo}",
|
||||||
|
"subscriptions_url": "https://api.github.com/users/github/subscriptions",
|
||||||
|
"organizations_url": "https://api.github.com/users/github/orgs",
|
||||||
|
"repos_url": "https://api.github.com/users/github/repos",
|
||||||
|
"events_url": "https://api.github.com/users/github/events{/privacy}",
|
||||||
|
"received_events_url": "https://api.github.com/users/github/received_events",
|
||||||
|
"type": "Organization",
|
||||||
|
"site_admin": false
|
||||||
|
},
|
||||||
|
"name": "GitHub Actions",
|
||||||
|
"description": "Automate your workflow from idea to production",
|
||||||
|
"external_url": "https://help.github.com/en/actions",
|
||||||
|
"html_url": "https://github.com/apps/github-actions",
|
||||||
|
"created_at": "2018-07-30T09:30:17Z",
|
||||||
|
"updated_at": "2019-12-10T19:04:12Z",
|
||||||
|
"permissions": {
|
||||||
|
"actions": "write",
|
||||||
|
"checks": "write",
|
||||||
|
"contents": "write",
|
||||||
|
"deployments": "write",
|
||||||
|
"issues": "write",
|
||||||
|
"metadata": "read",
|
||||||
|
"organization_packages": "write",
|
||||||
|
"packages": "write",
|
||||||
|
"pages": "write",
|
||||||
|
"pull_requests": "write",
|
||||||
|
"repository_hooks": "write",
|
||||||
|
"repository_projects": "write",
|
||||||
|
"security_events": "write",
|
||||||
|
"statuses": "write",
|
||||||
|
"vulnerability_alerts": "read"
|
||||||
|
},
|
||||||
|
"events": [
|
||||||
|
"check_run",
|
||||||
|
"check_suite",
|
||||||
|
"create",
|
||||||
|
"delete",
|
||||||
|
"deployment",
|
||||||
|
"deployment_status",
|
||||||
|
"fork",
|
||||||
|
"gollum",
|
||||||
|
"issues",
|
||||||
|
"issue_comment",
|
||||||
|
"label",
|
||||||
|
"milestone",
|
||||||
|
"page_build",
|
||||||
|
"project",
|
||||||
|
"project_card",
|
||||||
|
"project_column",
|
||||||
|
"public",
|
||||||
|
"pull_request",
|
||||||
|
"pull_request_review",
|
||||||
|
"pull_request_review_comment",
|
||||||
|
"push",
|
||||||
|
"registry_package",
|
||||||
|
"release",
|
||||||
|
"repository",
|
||||||
|
"repository_dispatch",
|
||||||
|
"status",
|
||||||
|
"watch",
|
||||||
|
"workflow_dispatch",
|
||||||
|
"workflow_run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"pull_requests": [
|
||||||
|
{
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO/pulls/1234567890",
|
||||||
|
"id": 1234567890,
|
||||||
|
"number": 1234567890,
|
||||||
|
"head": {
|
||||||
|
"ref": "feature",
|
||||||
|
"sha": "1234567890123456789012345678901234567890",
|
||||||
|
"repo": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO",
|
||||||
|
"name": "MYREPO"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"base": {
|
||||||
|
"ref": "master",
|
||||||
|
"sha": "1234567890123456789012345678901234567890",
|
||||||
|
"repo": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO",
|
||||||
|
"name": "MYREPO"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"repository": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"name": "MYREPO",
|
||||||
|
"full_name": "MYORG/MYREPO",
|
||||||
|
"private": true,
|
||||||
|
"owner": {
|
||||||
|
"login": "MYUSER",
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/1234567890?v=4",
|
||||||
|
"gravatar_id": "",
|
||||||
|
"url": "https://api.github.com/users/MYUSER",
|
||||||
|
"html_url": "https://github.com/MYUSER",
|
||||||
|
"followers_url": "https://api.github.com/users/MYUSER/followers",
|
||||||
|
"following_url": "https://api.github.com/users/MYUSER/following{/other_user}",
|
||||||
|
"gists_url": "https://api.github.com/users/MYUSER/gists{/gist_id}",
|
||||||
|
"starred_url": "https://api.github.com/users/MYUSER/starred{/owner}{/repo}",
|
||||||
|
"subscriptions_url": "https://api.github.com/users/MYUSER/subscriptions",
|
||||||
|
"organizations_url": "https://api.github.com/users/MYUSER/orgs",
|
||||||
|
"repos_url": "https://api.github.com/users/MYUSER/repos",
|
||||||
|
"events_url": "https://api.github.com/users/MYUSER/events{/privacy}",
|
||||||
|
"received_events_url": "https://api.github.com/users/MYUSER/received_events",
|
||||||
|
"type": "User",
|
||||||
|
"site_admin": false
|
||||||
|
},
|
||||||
|
"html_url": "https://github.com/MYUSER/MYREPO",
|
||||||
|
"description": null,
|
||||||
|
"fork": false,
|
||||||
|
"url": "https://api.github.com/repos/MYUSER/MYREPO",
|
||||||
|
"forks_url": "https://api.github.com/repos/MYUSER/MYREPO/forks",
|
||||||
|
"keys_url": "https://api.github.com/repos/MYUSER/MYREPO/keys{/key_id}",
|
||||||
|
"collaborators_url": "https://api.github.com/repos/MYUSER/MYREPO/collaborators{/collaborator}",
|
||||||
|
"teams_url": "https://api.github.com/repos/MYUSER/MYREPO/teams",
|
||||||
|
"hooks_url": "https://api.github.com/repos/MYUSER/MYREPO/hooks",
|
||||||
|
"issue_events_url": "https://api.github.com/repos/MYUSER/MYREPO/issues/events{/number}",
|
||||||
|
"events_url": "https://api.github.com/repos/MYUSER/MYREPO/events",
|
||||||
|
"assignees_url": "https://api.github.com/repos/MYUSER/MYREPO/assignees{/user}",
|
||||||
|
"branches_url": "https://api.github.com/repos/MYUSER/MYREPO/branches{/branch}",
|
||||||
|
"tags_url": "https://api.github.com/repos/MYUSER/MYREPO/tags",
|
||||||
|
"blobs_url": "https://api.github.com/repos/MYUSER/MYREPO/git/blobs{/sha}",
|
||||||
|
"git_tags_url": "https://api.github.com/repos/MYUSER/MYREPO/git/tags{/sha}",
|
||||||
|
"git_refs_url": "https://api.github.com/repos/MYUSER/MYREPO/git/refs{/sha}",
|
||||||
|
"trees_url": "https://api.github.com/repos/MYUSER/MYREPO/git/trees{/sha}",
|
||||||
|
"statuses_url": "https://api.github.com/repos/MYUSER/MYREPO/statuses/{sha}",
|
||||||
|
"languages_url": "https://api.github.com/repos/MYUSER/MYREPO/languages",
|
||||||
|
"stargazers_url": "https://api.github.com/repos/MYUSER/MYREPO/stargazers",
|
||||||
|
"contributors_url": "https://api.github.com/repos/MYUSER/MYREPO/contributors",
|
||||||
|
"subscribers_url": "https://api.github.com/repos/MYUSER/MYREPO/subscribers",
|
||||||
|
"subscription_url": "https://api.github.com/repos/MYUSER/MYREPO/subscription",
|
||||||
|
"commits_url": "https://api.github.com/repos/MYUSER/MYREPO/commits{/sha}",
|
||||||
|
"git_commits_url": "https://api.github.com/repos/MYUSER/MYREPO/git/commits{/sha}",
|
||||||
|
"comments_url": "https://api.github.com/repos/MYUSER/MYREPO/comments{/number}",
|
||||||
|
"issue_comment_url": "https://api.github.com/repos/MYUSER/MYREPO/issues/comments{/number}",
|
||||||
|
"contents_url": "https://api.github.com/repos/MYUSER/MYREPO/contents/{+path}",
|
||||||
|
"compare_url": "https://api.github.com/repos/MYUSER/MYREPO/compare/{base}...{head}",
|
||||||
|
"merges_url": "https://api.github.com/repos/MYUSER/MYREPO/merges",
|
||||||
|
"archive_url": "https://api.github.com/repos/MYUSER/MYREPO/{archive_format}{/ref}",
|
||||||
|
"downloads_url": "https://api.github.com/repos/MYUSER/MYREPO/downloads",
|
||||||
|
"issues_url": "https://api.github.com/repos/MYUSER/MYREPO/issues{/number}",
|
||||||
|
"pulls_url": "https://api.github.com/repos/MYUSER/MYREPO/pulls{/number}",
|
||||||
|
"milestones_url": "https://api.github.com/repos/MYUSER/MYREPO/milestones{/number}",
|
||||||
|
"notifications_url": "https://api.github.com/repos/MYUSER/MYREPO/notifications{?since,all,participating}",
|
||||||
|
"labels_url": "https://api.github.com/repos/MYUSER/MYREPO/labels{/name}",
|
||||||
|
"releases_url": "https://api.github.com/repos/MYUSER/MYREPO/releases{/id}",
|
||||||
|
"deployments_url": "https://api.github.com/repos/MYUSER/MYREPO/deployments",
|
||||||
|
"created_at": "2021-02-18T06:16:31Z",
|
||||||
|
"updated_at": "2021-02-18T06:16:31Z",
|
||||||
|
"pushed_at": "2021-02-18T06:16:31Z",
|
||||||
|
"git_url": "git://github.com/MYUSER/MYREPO.git",
|
||||||
|
"ssh_url": "git@github.com:MYUSER/MYREPO.git",
|
||||||
|
"clone_url": "https://github.com/MYUSER/MYREPO.git",
|
||||||
|
"svn_url": "https://github.com/MYUSER/MYREPO",
|
||||||
|
"homepage": null,
|
||||||
|
"size": 4,
|
||||||
|
"stargazers_count": 0,
|
||||||
|
"watchers_count": 0,
|
||||||
|
"language": null,
|
||||||
|
"has_issues": true,
|
||||||
|
"has_projects": true,
|
||||||
|
"has_downloads": true,
|
||||||
|
"has_wiki": true,
|
||||||
|
"has_pages": false,
|
||||||
|
"forks_count": 0,
|
||||||
|
"mirror_url": null,
|
||||||
|
"archived": false,
|
||||||
|
"disabled": false,
|
||||||
|
"open_issues_count": 0,
|
||||||
|
"license": null,
|
||||||
|
"forks": 0,
|
||||||
|
"open_issues": 0,
|
||||||
|
"watchers": 0,
|
||||||
|
"default_branch": "main"
|
||||||
|
},
|
||||||
|
"sender": {
|
||||||
|
"login": "MYUSER",
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/1234567890?v=4",
|
||||||
|
"gravatar_id": "",
|
||||||
|
"url": "https://api.github.com/users/MYUSER",
|
||||||
|
"html_url": "https://github.com/MYUSER",
|
||||||
|
"followers_url": "https://api.github.com/users/MYUSER/followers",
|
||||||
|
"following_url": "https://api.github.com/users/MYUSER/following{/other_user}",
|
||||||
|
"gists_url": "https://api.github.com/users/MYUSER/gists{/gist_id}",
|
||||||
|
"starred_url": "https://api.github.com/users/MYUSER/starred{/owner}{/repo}",
|
||||||
|
"subscriptions_url": "https://api.github.com/users/MYUSER/subscriptions",
|
||||||
|
"organizations_url": "https://api.github.com/users/MYUSER/orgs",
|
||||||
|
"repos_url": "https://api.github.com/users/MYUSER/repos",
|
||||||
|
"events_url": "https://api.github.com/users/MYUSER/events{/privacy}",
|
||||||
|
"received_events_url": "https://api.github.com/users/MYUSER/received_events",
|
||||||
|
"type": "User",
|
||||||
|
"site_admin": false
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -24,13 +24,34 @@ const (
|
|||||||
`
|
`
|
||||||
)
|
)
|
||||||
|
|
||||||
type Handler struct {
|
type ListRunnersHandler struct {
|
||||||
Status int
|
Status int
|
||||||
Body string
|
Body string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *ListRunnersHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||||
|
w.WriteHeader(h.Status)
|
||||||
|
fmt.Fprintf(w, h.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Handler struct {
|
||||||
|
Status int
|
||||||
|
Body string
|
||||||
|
|
||||||
|
Statuses map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||||
w.WriteHeader(h.Status)
|
w.WriteHeader(h.Status)
|
||||||
|
|
||||||
|
status := req.URL.Query().Get("status")
|
||||||
|
if h.Statuses != nil {
|
||||||
|
if body, ok := h.Statuses[status]; ok {
|
||||||
|
fmt.Fprintf(w, body)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fmt.Fprintf(w, h.Body)
|
fmt.Fprintf(w, h.Body)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -92,12 +113,21 @@ func NewServer(opts ...Option) *httptest.Server {
|
|||||||
Status: http.StatusBadRequest,
|
Status: http.StatusBadRequest,
|
||||||
Body: "",
|
Body: "",
|
||||||
},
|
},
|
||||||
|
"/enterprises/test/actions/runners/registration-token": &Handler{
|
||||||
|
Status: http.StatusCreated,
|
||||||
|
Body: fmt.Sprintf("{\"token\": \"%s\", \"expires_at\": \"%s\"}", RegistrationToken, time.Now().Add(time.Hour*1).Format(time.RFC3339)),
|
||||||
|
},
|
||||||
|
"/enterprises/invalid/actions/runners/registration-token": &Handler{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Body: fmt.Sprintf("{\"token\": \"%s\", \"expires_at\": \"%s\"}", RegistrationToken, time.Now().Add(time.Hour*1).Format(time.RFC3339)),
|
||||||
|
},
|
||||||
|
"/enterprises/error/actions/runners/registration-token": &Handler{
|
||||||
|
Status: http.StatusBadRequest,
|
||||||
|
Body: "",
|
||||||
|
},
|
||||||
|
|
||||||
// For ListRunners
|
// For ListRunners
|
||||||
"/repos/test/valid/actions/runners": &Handler{
|
"/repos/test/valid/actions/runners": config.FixedResponses.ListRunners,
|
||||||
Status: http.StatusOK,
|
|
||||||
Body: RunnersListBody,
|
|
||||||
},
|
|
||||||
"/repos/test/invalid/actions/runners": &Handler{
|
"/repos/test/invalid/actions/runners": &Handler{
|
||||||
Status: http.StatusNoContent,
|
Status: http.StatusNoContent,
|
||||||
Body: "",
|
Body: "",
|
||||||
@@ -118,6 +148,18 @@ func NewServer(opts ...Option) *httptest.Server {
|
|||||||
Status: http.StatusBadRequest,
|
Status: http.StatusBadRequest,
|
||||||
Body: "",
|
Body: "",
|
||||||
},
|
},
|
||||||
|
"/enterprises/test/actions/runners": &Handler{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Body: RunnersListBody,
|
||||||
|
},
|
||||||
|
"/enterprises/invalid/actions/runners": &Handler{
|
||||||
|
Status: http.StatusNoContent,
|
||||||
|
Body: "",
|
||||||
|
},
|
||||||
|
"/enterprises/error/actions/runners": &Handler{
|
||||||
|
Status: http.StatusBadRequest,
|
||||||
|
Body: "",
|
||||||
|
},
|
||||||
|
|
||||||
// For RemoveRunner
|
// For RemoveRunner
|
||||||
"/repos/test/valid/actions/runners/1": &Handler{
|
"/repos/test/valid/actions/runners/1": &Handler{
|
||||||
@@ -144,6 +186,18 @@ func NewServer(opts ...Option) *httptest.Server {
|
|||||||
Status: http.StatusBadRequest,
|
Status: http.StatusBadRequest,
|
||||||
Body: "",
|
Body: "",
|
||||||
},
|
},
|
||||||
|
"/enterprises/test/actions/runners/1": &Handler{
|
||||||
|
Status: http.StatusNoContent,
|
||||||
|
Body: "",
|
||||||
|
},
|
||||||
|
"/enterprises/invalid/actions/runners/1": &Handler{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Body: "",
|
||||||
|
},
|
||||||
|
"/enterprises/error/actions/runners/1": &Handler{
|
||||||
|
Status: http.StatusBadRequest,
|
||||||
|
Body: "",
|
||||||
|
},
|
||||||
|
|
||||||
// For auto-scaling based on the number of queued(pending) workflow runs
|
// For auto-scaling based on the number of queued(pending) workflow runs
|
||||||
"/repos/test/valid/actions/runs": config.FixedResponses.ListRepositoryWorkflowRuns,
|
"/repos/test/valid/actions/runs": config.FixedResponses.ListRepositoryWorkflowRuns,
|
||||||
@@ -159,3 +213,10 @@ func NewServer(opts ...Option) *httptest.Server {
|
|||||||
|
|
||||||
return httptest.NewServer(mux)
|
return httptest.NewServer(mux)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func DefaultListRunnersHandler() *ListRunnersHandler {
|
||||||
|
return &ListRunnersHandler{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Body: RunnersListBody,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,17 +1,24 @@
|
|||||||
package fake
|
package fake
|
||||||
|
|
||||||
|
import "net/http"
|
||||||
|
|
||||||
type FixedResponses struct {
|
type FixedResponses struct {
|
||||||
ListRepositoryWorkflowRuns *Handler
|
ListRepositoryWorkflowRuns *Handler
|
||||||
ListWorkflowJobs *MapHandler
|
ListWorkflowJobs *MapHandler
|
||||||
|
ListRunners http.Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
type Option func(*ServerConfig)
|
type Option func(*ServerConfig)
|
||||||
|
|
||||||
func WithListRepositoryWorkflowRunsResponse(status int, body string) Option {
|
func WithListRepositoryWorkflowRunsResponse(status int, body, queued, in_progress string) Option {
|
||||||
return func(c *ServerConfig) {
|
return func(c *ServerConfig) {
|
||||||
c.FixedResponses.ListRepositoryWorkflowRuns = &Handler{
|
c.FixedResponses.ListRepositoryWorkflowRuns = &Handler{
|
||||||
Status: status,
|
Status: status,
|
||||||
Body: body,
|
Body: body,
|
||||||
|
Statuses: map[string]string{
|
||||||
|
"queued": queued,
|
||||||
|
"in_progress": in_progress,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -25,6 +32,15 @@ func WithListWorkflowJobsResponse(status int, bodies map[int]string) Option {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func WithListRunnersResponse(status int, body string) Option {
|
||||||
|
return func(c *ServerConfig) {
|
||||||
|
c.FixedResponses.ListRunners = &ListRunnersHandler{
|
||||||
|
Status: status,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func WithFixedResponses(responses *FixedResponses) Option {
|
func WithFixedResponses(responses *FixedResponses) Option {
|
||||||
return func(c *ServerConfig) {
|
return func(c *ServerConfig) {
|
||||||
c.FixedResponses = responses
|
c.FixedResponses = responses
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ import (
|
|||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
|
||||||
"github.com/google/go-github/v33/github"
|
"github.com/google/go-github/v33/github"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
)
|
)
|
||||||
@@ -29,15 +31,15 @@ func (r *RunnersList) Add(runner *github.Runner) {
|
|||||||
func (r *RunnersList) GetServer() *httptest.Server {
|
func (r *RunnersList) GetServer() *httptest.Server {
|
||||||
router := mux.NewRouter()
|
router := mux.NewRouter()
|
||||||
|
|
||||||
router.Handle("/repos/{owner}/{repo}/actions/runners", r.handleList())
|
router.Handle("/repos/{owner}/{repo}/actions/runners", r.HandleList())
|
||||||
router.Handle("/repos/{owner}/{repo}/actions/runners/{id}", r.handleRemove())
|
router.Handle("/repos/{owner}/{repo}/actions/runners/{id}", r.handleRemove())
|
||||||
router.Handle("/orgs/{org}/actions/runners", r.handleList())
|
router.Handle("/orgs/{org}/actions/runners", r.HandleList())
|
||||||
router.Handle("/orgs/{org}/actions/runners/{id}", r.handleRemove())
|
router.Handle("/orgs/{org}/actions/runners/{id}", r.handleRemove())
|
||||||
|
|
||||||
return httptest.NewServer(router)
|
return httptest.NewServer(router)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RunnersList) handleList() http.HandlerFunc {
|
func (r *RunnersList) HandleList() http.HandlerFunc {
|
||||||
return func(w http.ResponseWriter, res *http.Request) {
|
return func(w http.ResponseWriter, res *http.Request) {
|
||||||
j, err := json.Marshal(github.Runners{
|
j, err := json.Marshal(github.Runners{
|
||||||
TotalCount: len(r.runners),
|
TotalCount: len(r.runners),
|
||||||
@@ -64,6 +66,32 @@ func (r *RunnersList) handleRemove() http.HandlerFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *RunnersList) Sync(runners []v1alpha1.Runner) {
|
||||||
|
r.runners = nil
|
||||||
|
|
||||||
|
for i, want := range runners {
|
||||||
|
r.Add(&github.Runner{
|
||||||
|
ID: github.Int64(int64(i)),
|
||||||
|
Name: github.String(want.Name),
|
||||||
|
OS: github.String("linux"),
|
||||||
|
Status: github.String("online"),
|
||||||
|
Busy: github.Bool(false),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RunnersList) AddOffline(runners []v1alpha1.Runner) {
|
||||||
|
for i, want := range runners {
|
||||||
|
r.Add(&github.Runner{
|
||||||
|
ID: github.Int64(int64(1000 + i)),
|
||||||
|
Name: github.String(want.Name),
|
||||||
|
OS: github.String("linux"),
|
||||||
|
Status: github.String("offline"),
|
||||||
|
Busy: github.Bool(false),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func exists(runners []*github.Runner, runner *github.Runner) bool {
|
func exists(runners []*github.Runner, runner *github.Runner) bool {
|
||||||
for _, r := range runners {
|
for _, r := range runners {
|
||||||
if *r.Name == *runner.Name {
|
if *r.Name == *runner.Name {
|
||||||
|
|||||||
219
github/github.go
219
github/github.go
@@ -5,12 +5,14 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/bradleyfalzon/ghinstallation"
|
"github.com/bradleyfalzon/ghinstallation"
|
||||||
"github.com/google/go-github/v33/github"
|
"github.com/google/go-github/v33/github"
|
||||||
|
"github.com/summerwind/actions-runner-controller/github/metrics"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -34,20 +36,24 @@ type Client struct {
|
|||||||
|
|
||||||
// NewClient creates a Github Client
|
// NewClient creates a Github Client
|
||||||
func (c *Config) NewClient() (*Client, error) {
|
func (c *Config) NewClient() (*Client, error) {
|
||||||
var (
|
var transport http.RoundTripper
|
||||||
httpClient *http.Client
|
|
||||||
client *github.Client
|
|
||||||
)
|
|
||||||
githubBaseURL := "https://github.com/"
|
|
||||||
if len(c.Token) > 0 {
|
if len(c.Token) > 0 {
|
||||||
httpClient = oauth2.NewClient(context.Background(), oauth2.StaticTokenSource(
|
transport = oauth2.NewClient(context.Background(), oauth2.StaticTokenSource(&oauth2.Token{AccessToken: c.Token})).Transport
|
||||||
&oauth2.Token{AccessToken: c.Token},
|
|
||||||
))
|
|
||||||
} else {
|
} else {
|
||||||
tr, err := ghinstallation.NewKeyFromFile(http.DefaultTransport, c.AppID, c.AppInstallationID, c.AppPrivateKey)
|
var tr *ghinstallation.Transport
|
||||||
|
|
||||||
|
if _, err := os.Stat(c.AppPrivateKey); err == nil {
|
||||||
|
tr, err = ghinstallation.NewKeyFromFile(http.DefaultTransport, c.AppID, c.AppInstallationID, c.AppPrivateKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("authentication failed: %v", err)
|
return nil, fmt.Errorf("authentication failed: using private key at %s: %v", c.AppPrivateKey, err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
tr, err = ghinstallation.New(http.DefaultTransport, c.AppID, c.AppInstallationID, []byte(c.AppPrivateKey))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("authentication failed: using private key of size %d (%s...): %v", len(c.AppPrivateKey), strings.Split(c.AppPrivateKey, "\n")[0], err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if len(c.EnterpriseURL) > 0 {
|
if len(c.EnterpriseURL) > 0 {
|
||||||
githubAPIURL, err := getEnterpriseApiUrl(c.EnterpriseURL)
|
githubAPIURL, err := getEnterpriseApiUrl(c.EnterpriseURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -55,9 +61,13 @@ func (c *Config) NewClient() (*Client, error) {
|
|||||||
}
|
}
|
||||||
tr.BaseURL = githubAPIURL
|
tr.BaseURL = githubAPIURL
|
||||||
}
|
}
|
||||||
httpClient = &http.Client{Transport: tr}
|
transport = tr
|
||||||
}
|
}
|
||||||
|
transport = metrics.Transport{Transport: transport}
|
||||||
|
httpClient := &http.Client{Transport: transport}
|
||||||
|
|
||||||
|
var client *github.Client
|
||||||
|
var githubBaseURL string
|
||||||
if len(c.EnterpriseURL) > 0 {
|
if len(c.EnterpriseURL) > 0 {
|
||||||
var err error
|
var err error
|
||||||
client, err = github.NewEnterpriseClient(c.EnterpriseURL, c.EnterpriseURL, httpClient)
|
client, err = github.NewEnterpriseClient(c.EnterpriseURL, c.EnterpriseURL, httpClient)
|
||||||
@@ -67,6 +77,7 @@ func (c *Config) NewClient() (*Client, error) {
|
|||||||
githubBaseURL = fmt.Sprintf("%s://%s%s", client.BaseURL.Scheme, client.BaseURL.Host, strings.TrimSuffix(client.BaseURL.Path, "api/v3/"))
|
githubBaseURL = fmt.Sprintf("%s://%s%s", client.BaseURL.Scheme, client.BaseURL.Host, strings.TrimSuffix(client.BaseURL.Path, "api/v3/"))
|
||||||
} else {
|
} else {
|
||||||
client = github.NewClient(httpClient)
|
client = github.NewClient(httpClient)
|
||||||
|
githubBaseURL = "https://github.com/"
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Client{
|
return &Client{
|
||||||
@@ -78,24 +89,27 @@ func (c *Config) NewClient() (*Client, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetRegistrationToken returns a registration token tied with the name of repository and runner.
|
// GetRegistrationToken returns a registration token tied with the name of repository and runner.
|
||||||
func (c *Client) GetRegistrationToken(ctx context.Context, org, repo, name string) (*github.RegistrationToken, error) {
|
func (c *Client) GetRegistrationToken(ctx context.Context, enterprise, org, repo, name string) (*github.RegistrationToken, error) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
key := getRegistrationKey(org, repo)
|
key := getRegistrationKey(org, repo, enterprise)
|
||||||
rt, ok := c.regTokens[key]
|
rt, ok := c.regTokens[key]
|
||||||
|
|
||||||
if ok && rt.GetExpiresAt().After(time.Now()) {
|
// we like to give runners a chance that are just starting up and may miss the expiration date by a bit
|
||||||
|
runnerStartupTimeout := 3 * time.Minute
|
||||||
|
|
||||||
|
if ok && rt.GetExpiresAt().After(time.Now().Add(runnerStartupTimeout)) {
|
||||||
return rt, nil
|
return rt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
owner, repo, err := getOwnerAndRepo(org, repo)
|
enterprise, owner, repo, err := getEnterpriseOrganisationAndRepo(enterprise, org, repo)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rt, err
|
return rt, err
|
||||||
}
|
}
|
||||||
|
|
||||||
rt, res, err := c.createRegistrationToken(ctx, owner, repo)
|
rt, res, err := c.createRegistrationToken(ctx, enterprise, owner, repo)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create registration token: %v", err)
|
return nil, fmt.Errorf("failed to create registration token: %v", err)
|
||||||
@@ -114,17 +128,17 @@ func (c *Client) GetRegistrationToken(ctx context.Context, org, repo, name strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RemoveRunner removes a runner with specified runner ID from repository.
|
// RemoveRunner removes a runner with specified runner ID from repository.
|
||||||
func (c *Client) RemoveRunner(ctx context.Context, org, repo string, runnerID int64) error {
|
func (c *Client) RemoveRunner(ctx context.Context, enterprise, org, repo string, runnerID int64) error {
|
||||||
owner, repo, err := getOwnerAndRepo(org, repo)
|
enterprise, owner, repo, err := getEnterpriseOrganisationAndRepo(enterprise, org, repo)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := c.removeRunner(ctx, owner, repo, runnerID)
|
res, err := c.removeRunner(ctx, enterprise, owner, repo, runnerID)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to remove runner: %v", err)
|
return fmt.Errorf("failed to remove runner: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if res.StatusCode != 204 {
|
if res.StatusCode != 204 {
|
||||||
@@ -135,8 +149,8 @@ func (c *Client) RemoveRunner(ctx context.Context, org, repo string, runnerID in
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ListRunners returns a list of runners of specified owner/repository name.
|
// ListRunners returns a list of runners of specified owner/repository name.
|
||||||
func (c *Client) ListRunners(ctx context.Context, org, repo string) ([]*github.Runner, error) {
|
func (c *Client) ListRunners(ctx context.Context, enterprise, org, repo string) ([]*github.Runner, error) {
|
||||||
owner, repo, err := getOwnerAndRepo(org, repo)
|
enterprise, owner, repo, err := getEnterpriseOrganisationAndRepo(enterprise, org, repo)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -144,12 +158,12 @@ func (c *Client) ListRunners(ctx context.Context, org, repo string) ([]*github.R
|
|||||||
|
|
||||||
var runners []*github.Runner
|
var runners []*github.Runner
|
||||||
|
|
||||||
opts := github.ListOptions{PerPage: 10}
|
opts := github.ListOptions{PerPage: 100}
|
||||||
for {
|
for {
|
||||||
list, res, err := c.listRunners(ctx, owner, repo, &opts)
|
list, res, err := c.listRunners(ctx, enterprise, owner, repo, &opts)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return runners, fmt.Errorf("failed to list runners: %v", err)
|
return runners, fmt.Errorf("failed to list runners: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
runners = append(runners, list.Runners...)
|
runners = append(runners, list.Runners...)
|
||||||
@@ -174,49 +188,102 @@ func (c *Client) cleanup() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// wrappers for github functions (switch between organization/repository mode)
|
// wrappers for github functions (switch between enterprise/organization/repository mode)
|
||||||
// so the calling functions don't need to switch and their code is a bit cleaner
|
// so the calling functions don't need to switch and their code is a bit cleaner
|
||||||
|
|
||||||
func (c *Client) createRegistrationToken(ctx context.Context, owner, repo string) (*github.RegistrationToken, *github.Response, error) {
|
func (c *Client) createRegistrationToken(ctx context.Context, enterprise, org, repo string) (*github.RegistrationToken, *github.Response, error) {
|
||||||
if len(repo) > 0 {
|
if len(repo) > 0 {
|
||||||
return c.Client.Actions.CreateRegistrationToken(ctx, owner, repo)
|
return c.Client.Actions.CreateRegistrationToken(ctx, org, repo)
|
||||||
}
|
|
||||||
|
|
||||||
return c.Client.Actions.CreateOrganizationRegistrationToken(ctx, owner)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) removeRunner(ctx context.Context, owner, repo string, runnerID int64) (*github.Response, error) {
|
|
||||||
if len(repo) > 0 {
|
|
||||||
return c.Client.Actions.RemoveRunner(ctx, owner, repo, runnerID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.Client.Actions.RemoveOrganizationRunner(ctx, owner, runnerID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) listRunners(ctx context.Context, owner, repo string, opts *github.ListOptions) (*github.Runners, *github.Response, error) {
|
|
||||||
if len(repo) > 0 {
|
|
||||||
return c.Client.Actions.ListRunners(ctx, owner, repo, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.Client.Actions.ListOrganizationRunners(ctx, owner, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validates owner and repo arguments. Both are optional, but at least one should be specified
|
|
||||||
func getOwnerAndRepo(org, repo string) (string, string, error) {
|
|
||||||
if len(repo) > 0 {
|
|
||||||
return splitOwnerAndRepo(repo)
|
|
||||||
}
|
}
|
||||||
if len(org) > 0 {
|
if len(org) > 0 {
|
||||||
return org, "", nil
|
return c.Client.Actions.CreateOrganizationRegistrationToken(ctx, org)
|
||||||
}
|
}
|
||||||
return "", "", fmt.Errorf("organization and repository are both empty")
|
return c.Client.Enterprise.CreateRegistrationToken(ctx, enterprise)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getRegistrationKey(org, repo string) string {
|
func (c *Client) removeRunner(ctx context.Context, enterprise, org, repo string, runnerID int64) (*github.Response, error) {
|
||||||
if len(org) > 0 {
|
if len(repo) > 0 {
|
||||||
return org
|
return c.Client.Actions.RemoveRunner(ctx, org, repo, runnerID)
|
||||||
}
|
}
|
||||||
return repo
|
if len(org) > 0 {
|
||||||
|
return c.Client.Actions.RemoveOrganizationRunner(ctx, org, runnerID)
|
||||||
|
}
|
||||||
|
return c.Client.Enterprise.RemoveRunner(ctx, enterprise, runnerID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) listRunners(ctx context.Context, enterprise, org, repo string, opts *github.ListOptions) (*github.Runners, *github.Response, error) {
|
||||||
|
if len(repo) > 0 {
|
||||||
|
return c.Client.Actions.ListRunners(ctx, org, repo, opts)
|
||||||
|
}
|
||||||
|
if len(org) > 0 {
|
||||||
|
return c.Client.Actions.ListOrganizationRunners(ctx, org, opts)
|
||||||
|
}
|
||||||
|
return c.Client.Enterprise.ListRunners(ctx, enterprise, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) ListRepositoryWorkflowRuns(ctx context.Context, user string, repoName string) ([]*github.WorkflowRun, error) {
|
||||||
|
queued, err := c.listRepositoryWorkflowRuns(ctx, user, repoName, "queued")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("listing queued workflow runs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
inProgress, err := c.listRepositoryWorkflowRuns(ctx, user, repoName, "in_progress")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("listing in_progress workflow runs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var workflowRuns []*github.WorkflowRun
|
||||||
|
|
||||||
|
workflowRuns = append(workflowRuns, queued...)
|
||||||
|
workflowRuns = append(workflowRuns, inProgress...)
|
||||||
|
|
||||||
|
return workflowRuns, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) listRepositoryWorkflowRuns(ctx context.Context, user string, repoName, status string) ([]*github.WorkflowRun, error) {
|
||||||
|
var workflowRuns []*github.WorkflowRun
|
||||||
|
|
||||||
|
opts := github.ListWorkflowRunsOptions{
|
||||||
|
ListOptions: github.ListOptions{
|
||||||
|
PerPage: 100,
|
||||||
|
},
|
||||||
|
Status: status,
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
list, res, err := c.Client.Actions.ListRepositoryWorkflowRuns(ctx, user, repoName, &opts)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return workflowRuns, fmt.Errorf("failed to list workflow runs: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
workflowRuns = append(workflowRuns, list.WorkflowRuns...)
|
||||||
|
if res.NextPage == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
opts.Page = res.NextPage
|
||||||
|
}
|
||||||
|
|
||||||
|
return workflowRuns, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validates enterprise, organisation and repo arguments. Both are optional, but at least one should be specified
|
||||||
|
func getEnterpriseOrganisationAndRepo(enterprise, org, repo string) (string, string, string, error) {
|
||||||
|
if len(repo) > 0 {
|
||||||
|
owner, repository, err := splitOwnerAndRepo(repo)
|
||||||
|
return "", owner, repository, err
|
||||||
|
}
|
||||||
|
if len(org) > 0 {
|
||||||
|
return "", org, "", nil
|
||||||
|
}
|
||||||
|
if len(enterprise) > 0 {
|
||||||
|
return enterprise, "", "", nil
|
||||||
|
}
|
||||||
|
return "", "", "", fmt.Errorf("enterprise, organization and repository are all empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRegistrationKey(org, repo, enterprise string) string {
|
||||||
|
return fmt.Sprintf("org=%s,repo=%s,enterprise=%s", org, repo, enterprise)
|
||||||
}
|
}
|
||||||
|
|
||||||
func splitOwnerAndRepo(repo string) (string, string, error) {
|
func splitOwnerAndRepo(repo string) (string, string, error) {
|
||||||
@@ -244,3 +311,37 @@ func getEnterpriseApiUrl(baseURL string) (string, error) {
|
|||||||
// Trim trailing slash, otherwise there's double slash added to token endpoint
|
// Trim trailing slash, otherwise there's double slash added to token endpoint
|
||||||
return fmt.Sprintf("%s://%s%s", baseEndpoint.Scheme, baseEndpoint.Host, strings.TrimSuffix(baseEndpoint.Path, "/")), nil
|
return fmt.Sprintf("%s://%s%s", baseEndpoint.Scheme, baseEndpoint.Host, strings.TrimSuffix(baseEndpoint.Path, "/")), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type RunnerNotFound struct {
|
||||||
|
runnerName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *RunnerNotFound) Error() string {
|
||||||
|
return fmt.Sprintf("runner %q not found", e.runnerName)
|
||||||
|
}
|
||||||
|
|
||||||
|
type RunnerOffline struct {
|
||||||
|
runnerName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *RunnerOffline) Error() string {
|
||||||
|
return fmt.Sprintf("runner %q offline", e.runnerName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Client) IsRunnerBusy(ctx context.Context, enterprise, org, repo, name string) (bool, error) {
|
||||||
|
runners, err := r.ListRunners(ctx, enterprise, org, repo)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, runner := range runners {
|
||||||
|
if runner.GetName() == name {
|
||||||
|
if runner.GetStatus() == "offline" {
|
||||||
|
return false, &RunnerOffline{runnerName: name}
|
||||||
|
}
|
||||||
|
return runner.GetBusy(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, &RunnerNotFound{runnerName: name}
|
||||||
|
}
|
||||||
|
|||||||
@@ -32,29 +32,36 @@ func newTestClient() *Client {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
server = fake.NewServer()
|
res := &fake.FixedResponses{
|
||||||
|
ListRunners: fake.DefaultListRunnersHandler(),
|
||||||
|
}
|
||||||
|
server = fake.NewServer(fake.WithFixedResponses(res))
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
m.Run()
|
m.Run()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetRegistrationToken(t *testing.T) {
|
func TestGetRegistrationToken(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
enterprise string
|
||||||
org string
|
org string
|
||||||
repo string
|
repo string
|
||||||
token string
|
token string
|
||||||
err bool
|
err bool
|
||||||
}{
|
}{
|
||||||
{org: "", repo: "test/valid", token: fake.RegistrationToken, err: false},
|
{enterprise: "", org: "", repo: "test/valid", token: fake.RegistrationToken, err: false},
|
||||||
{org: "", repo: "test/invalid", token: "", err: true},
|
{enterprise: "", org: "", repo: "test/invalid", token: "", err: true},
|
||||||
{org: "", repo: "test/error", token: "", err: true},
|
{enterprise: "", org: "", repo: "test/error", token: "", err: true},
|
||||||
{org: "test", repo: "", token: fake.RegistrationToken, err: false},
|
{enterprise: "", org: "test", repo: "", token: fake.RegistrationToken, err: false},
|
||||||
{org: "invalid", repo: "", token: "", err: true},
|
{enterprise: "", org: "invalid", repo: "", token: "", err: true},
|
||||||
{org: "error", repo: "", token: "", err: true},
|
{enterprise: "", org: "error", repo: "", token: "", err: true},
|
||||||
|
{enterprise: "test", org: "", repo: "", token: fake.RegistrationToken, err: false},
|
||||||
|
{enterprise: "invalid", org: "", repo: "", token: "", err: true},
|
||||||
|
{enterprise: "error", org: "", repo: "", token: "", err: true},
|
||||||
}
|
}
|
||||||
|
|
||||||
client := newTestClient()
|
client := newTestClient()
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
rt, err := client.GetRegistrationToken(context.Background(), tt.org, tt.repo, "test")
|
rt, err := client.GetRegistrationToken(context.Background(), tt.enterprise, tt.org, tt.repo, "test")
|
||||||
if !tt.err && err != nil {
|
if !tt.err && err != nil {
|
||||||
t.Errorf("[%d] unexpected error: %v", i, err)
|
t.Errorf("[%d] unexpected error: %v", i, err)
|
||||||
}
|
}
|
||||||
@@ -66,22 +73,26 @@ func TestGetRegistrationToken(t *testing.T) {
|
|||||||
|
|
||||||
func TestListRunners(t *testing.T) {
|
func TestListRunners(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
enterprise string
|
||||||
org string
|
org string
|
||||||
repo string
|
repo string
|
||||||
length int
|
length int
|
||||||
err bool
|
err bool
|
||||||
}{
|
}{
|
||||||
{org: "", repo: "test/valid", length: 2, err: false},
|
{enterprise: "", org: "", repo: "test/valid", length: 2, err: false},
|
||||||
{org: "", repo: "test/invalid", length: 0, err: true},
|
{enterprise: "", org: "", repo: "test/invalid", length: 0, err: true},
|
||||||
{org: "", repo: "test/error", length: 0, err: true},
|
{enterprise: "", org: "", repo: "test/error", length: 0, err: true},
|
||||||
{org: "test", repo: "", length: 2, err: false},
|
{enterprise: "", org: "test", repo: "", length: 2, err: false},
|
||||||
{org: "invalid", repo: "", length: 0, err: true},
|
{enterprise: "", org: "invalid", repo: "", length: 0, err: true},
|
||||||
{org: "error", repo: "", length: 0, err: true},
|
{enterprise: "", org: "error", repo: "", length: 0, err: true},
|
||||||
|
{enterprise: "test", org: "", repo: "", length: 2, err: false},
|
||||||
|
{enterprise: "invalid", org: "", repo: "", length: 0, err: true},
|
||||||
|
{enterprise: "error", org: "", repo: "", length: 0, err: true},
|
||||||
}
|
}
|
||||||
|
|
||||||
client := newTestClient()
|
client := newTestClient()
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
runners, err := client.ListRunners(context.Background(), tt.org, tt.repo)
|
runners, err := client.ListRunners(context.Background(), tt.enterprise, tt.org, tt.repo)
|
||||||
if !tt.err && err != nil {
|
if !tt.err && err != nil {
|
||||||
t.Errorf("[%d] unexpected error: %v", i, err)
|
t.Errorf("[%d] unexpected error: %v", i, err)
|
||||||
}
|
}
|
||||||
@@ -93,21 +104,25 @@ func TestListRunners(t *testing.T) {
|
|||||||
|
|
||||||
func TestRemoveRunner(t *testing.T) {
|
func TestRemoveRunner(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
enterprise string
|
||||||
org string
|
org string
|
||||||
repo string
|
repo string
|
||||||
err bool
|
err bool
|
||||||
}{
|
}{
|
||||||
{org: "", repo: "test/valid", err: false},
|
{enterprise: "", org: "", repo: "test/valid", err: false},
|
||||||
{org: "", repo: "test/invalid", err: true},
|
{enterprise: "", org: "", repo: "test/invalid", err: true},
|
||||||
{org: "", repo: "test/error", err: true},
|
{enterprise: "", org: "", repo: "test/error", err: true},
|
||||||
{org: "test", repo: "", err: false},
|
{enterprise: "", org: "test", repo: "", err: false},
|
||||||
{org: "invalid", repo: "", err: true},
|
{enterprise: "", org: "invalid", repo: "", err: true},
|
||||||
{org: "error", repo: "", err: true},
|
{enterprise: "", org: "error", repo: "", err: true},
|
||||||
|
{enterprise: "test", org: "", repo: "", err: false},
|
||||||
|
{enterprise: "invalid", org: "", repo: "", err: true},
|
||||||
|
{enterprise: "error", org: "", repo: "", err: true},
|
||||||
}
|
}
|
||||||
|
|
||||||
client := newTestClient()
|
client := newTestClient()
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
err := client.RemoveRunner(context.Background(), tt.org, tt.repo, int64(1))
|
err := client.RemoveRunner(context.Background(), tt.enterprise, tt.org, tt.repo, int64(1))
|
||||||
if !tt.err && err != nil {
|
if !tt.err && err != nil {
|
||||||
t.Errorf("[%d] unexpected error: %v", i, err)
|
t.Errorf("[%d] unexpected error: %v", i, err)
|
||||||
}
|
}
|
||||||
|
|||||||
63
github/metrics/transport.go
Normal file
63
github/metrics/transport.go
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
// Package metrics provides monitoring of the GitHub related metrics.
|
||||||
|
//
|
||||||
|
// This depends on the metrics exporter of kubebuilder.
|
||||||
|
// See https://book.kubebuilder.io/reference/metrics.html for details.
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
metrics.Registry.MustRegister(metricRateLimit, metricRateLimitRemaining)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting
|
||||||
|
metricRateLimit = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "github_rate_limit",
|
||||||
|
Help: "The maximum number of requests you're permitted to make per hour",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
metricRateLimitRemaining = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "github_rate_limit_remaining",
|
||||||
|
Help: "The number of requests remaining in the current rate limit window",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting
|
||||||
|
headerRateLimit = "X-RateLimit-Limit"
|
||||||
|
headerRateLimitRemaining = "X-RateLimit-Remaining"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Transport wraps a transport with metrics monitoring
|
||||||
|
type Transport struct {
|
||||||
|
Transport http.RoundTripper
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
|
resp, err := t.Transport.RoundTrip(req)
|
||||||
|
if resp != nil {
|
||||||
|
parseResponse(resp)
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseResponse(resp *http.Response) {
|
||||||
|
rateLimit, err := strconv.Atoi(resp.Header.Get(headerRateLimit))
|
||||||
|
if err == nil {
|
||||||
|
metricRateLimit.Set(float64(rateLimit))
|
||||||
|
}
|
||||||
|
rateLimitRemaining, err := strconv.Atoi(resp.Header.Get(headerRateLimitRemaining))
|
||||||
|
if err == nil {
|
||||||
|
metricRateLimitRemaining.Set(float64(rateLimitRemaining))
|
||||||
|
}
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user