Compare commits

..

63 Commits

Author SHA1 Message Date
mumoshu
e2d3489171 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2023-11-27 07:04:47 +00:00
mumoshu
90db051e3e Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2023-11-27 05:06:40 +00:00
mumoshu
41e135be59 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2023-08-30 06:02:42 +00:00
mumoshu
11938d728d Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2023-08-28 05:25:26 +00:00
Link-
bfd63fb6f9 Update index.yaml
Signed-off-by: Link- <Link-@users.noreply.github.com>
2023-05-12 13:19:52 +00:00
mumoshu
c70e760b19 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2023-04-17 13:01:55 +00:00
Link-
3c7e32eb9f Update index.yaml
Signed-off-by: Link- <Link-@users.noreply.github.com>
2023-04-06 12:05:57 +00:00
Yusuke Kuoka
ccc7b81b5c chart-repo: Update index.yaml to include 0.22.0 and 0.23.0 (#2452) 2023-03-30 05:22:21 -04:00
Jan van den Berg
701f8427a0 docs: typo + codefence fix (#2053)
* Little typo in PRIVATE_KEY_FILE_PATH.
* Code fence `shell script` does not work and does not render well, `shell` (just as above) does work.
2022-11-28 11:28:47 +00:00
toast-gear
20322eb2c9 Update index.yaml
Signed-off-by: toast-gear <toast-gear@users.noreply.github.com>
2022-10-25 19:13:34 +00:00
Yusuke Kuoka
72edcbba10 Show me as the verified publisher on ArtifactHub (#1815)
Ref #1502

See https://artifacthub.io/docs/topics/repositories/helm-charts/#helm-charts-repositories for more information on how ArtifactHub works for Helm charts and how we can set up this metadata file.

Signed-off-by: Yusuke Kuoka <ykuoka@gmail.com>

Signed-off-by: Yusuke Kuoka <ykuoka@gmail.com>
2022-09-20 18:49:15 +09:00
mumoshu
5396f9322b Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2022-09-13 00:10:15 +00:00
mumoshu
65b0cdc588 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2022-07-15 01:24:15 +00:00
mumoshu
b7dbf997ec Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2022-07-09 23:26:54 +00:00
mumoshu
c04f1daeab Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2022-07-05 02:20:10 +00:00
mumoshu
84b7abe2ce Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2022-06-15 02:35:41 +00:00
mumoshu
85422d15a8 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2022-06-03 13:02:23 +00:00
toast-gear
d3e9c43c34 Update index.yaml
Signed-off-by: toast-gear <toast-gear@users.noreply.github.com>
2022-04-29 12:55:04 +00:00
mumoshu
6a9b0d74fd Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2022-04-08 01:59:57 +00:00
toast-gear
3e1fbfa830 Update index.yaml
Signed-off-by: toast-gear <toast-gear@users.noreply.github.com>
2022-04-03 09:16:27 +00:00
toast-gear
c842be4501 Update index.yaml
Signed-off-by: toast-gear <toast-gear@users.noreply.github.com>
2022-03-29 06:47:43 +00:00
toast-gear
132867482f Update index.yaml
Signed-off-by: toast-gear <toast-gear@users.noreply.github.com>
2022-03-16 07:58:36 +00:00
toast-gear
6a2a90164f Update index.yaml
Signed-off-by: toast-gear <toast-gear@users.noreply.github.com>
2022-02-21 09:25:02 +00:00
mumoshu
f7952743e5 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2022-02-18 01:58:55 +00:00
mumoshu
1492a0d0f9 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2022-02-09 00:30:45 +00:00
mumoshu
26d9758452 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2022-02-08 03:57:19 +00:00
Callum Tait
6ac125f060 Revert "dics: typo (#1063)" (#1080)
This reverts commit 48af148297.
2022-01-28 22:28:44 +00:00
Ashith Wilson
48af148297 dics: typo (#1063) 2022-01-28 22:27:20 +00:00
toast-gear
818c1bd3dd Update index.yaml
Signed-off-by: toast-gear <toast-gear@users.noreply.github.com>
2021-12-08 21:59:25 +00:00
toast-gear
a4c569f552 Update index.yaml
Signed-off-by: toast-gear <toast-gear@users.noreply.github.com>
2021-11-15 19:59:31 +00:00
toast-gear
55d5550ad4 Update index.yaml
Signed-off-by: toast-gear <toast-gear@users.noreply.github.com>
2021-10-18 21:06:27 +00:00
toast-gear
ddc29b1d38 Update index.yaml
Signed-off-by: toast-gear <toast-gear@users.noreply.github.com>
2021-10-02 09:05:47 +00:00
mumoshu
b684553da2 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-09-24 00:41:09 +00:00
mumoshu
ec8a74f219 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-09-15 00:39:32 +00:00
mumoshu
73e6a91de3 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-08-31 00:47:20 +00:00
mumoshu
e1c62ee5e5 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-07-03 06:17:45 +00:00
mumoshu
a192a76ca9 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-06-30 11:43:05 +00:00
mumoshu
a7d378ca09 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-06-30 00:54:28 +00:00
mumoshu
285cfd69cd Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-06-29 06:51:20 +00:00
mumoshu
c1fb952a94 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-06-27 07:51:38 +00:00
mumoshu
b1916a0e1a Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-06-11 00:21:50 +00:00
mumoshu
44972a284c Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-06-09 23:31:21 +00:00
toast-gear
6dd93508e7 Update index.yaml
Signed-off-by: toast-gear <toast-gear@users.noreply.github.com>
2021-06-08 18:25:40 +00:00
mumoshu
930efd244d Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-04-18 04:59:38 +00:00
mumoshu
60f577ea04 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-04-06 01:10:56 +00:00
mumoshu
31a16d3c2e Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-03-19 02:16:24 +00:00
mumoshu
c53a03372d Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-03-18 23:57:04 +00:00
mumoshu
e9caad7dec Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-03-18 01:37:25 +00:00
mumoshu
7a21693912 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-03-17 22:37:12 +00:00
mumoshu
942fc9fe00 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-03-16 01:53:11 +00:00
mumoshu
a2096046d5 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-03-14 01:22:24 +00:00
mumoshu
a7cb21605c Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-03-09 06:04:20 +00:00
mumoshu
c495ce47ed Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-03-05 11:28:36 +00:00
mumoshu
f1a1941455 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-03-03 00:22:03 +00:00
mumoshu
a19eab8382 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-02-26 00:27:48 +00:00
mumoshu
4ee7e5541f Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-02-18 11:21:46 +00:00
mumoshu
013d5bd2b2 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-02-17 23:44:33 +00:00
mumoshu
c1d36ebaef Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-02-16 08:17:08 +00:00
mumoshu
71eb2ae333 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-02-16 00:46:35 +00:00
mumoshu
dd1ad63ca9 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-02-07 07:47:05 +00:00
mumoshu
de7e37509c Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-01-29 00:30:24 +00:00
mumoshu
51918fecbe Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-01-25 00:15:39 +00:00
mumoshu
4fb7d154d6 Update index.yaml
Signed-off-by: mumoshu <mumoshu@users.noreply.github.com>
2021-01-24 06:37:49 +00:00
163 changed files with 11949 additions and 41616 deletions

View File

@@ -1,13 +0,0 @@
Makefile
acceptance
runner
hack
test-assets
config
charts
.github
.envrc
.env
*.md
*.txt
*.sh

View File

@@ -1,36 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**Checks**
- [ ] My actions-runner-controller version (v0.x.y) does support the feature
- [ ] I'm using an unreleased version of the controller I built from HEAD of the default branch
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Environment (please complete the following information):**
- Controller Version [e.g. 0.18.2]
- Deployment Method [e.g. Helm and Kustomize ]
- Helm Chart Version [e.g. 0.11.0, if applicable]
**Additional context**
Add any other context about the problem here.

View File

@@ -1,19 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

View File

@@ -1,34 +0,0 @@
# Release Note Template
This is the template of actions-runner-controller's release notes.
Whenever a new release is made, I start by manually copy-pasting this template onto the GitHub UI for creating the release.
I then walk-through all the changes, take sometime to think abount best one-sentence explanations to tell the users about changes, write it all,
and click the publish button.
If you think you can improve future release notes in any way, please do submit a pull request to change the template below.
Note that even though it looks like a Go template, I don't use any templating to generate the changelog.
It's just that I'm used to reading and intepreting Go template by myself, not a computer program :)
**Title**:
```
v{{ .Version }}: {{ .TitlesOfImportantChanges }}
```
**Body**:
```
**CAUTION:** If you're using the Helm chart, beware to review changes to CRDs and do manually upgrade CRDs! Helm installs CRDs only on installing a chart. It doesn't automatically upgrade CRDs. Otherwise you end up with troubles like #427, #467, and #468. Please refer to the [UPGRADING](charts/actions-runner-controller/docs/UPGRADING.md) docs for the latest process.
This release includes the following changes from contributors. Thank you!
- @{{ .GitHubUser }} fixed {{ .Feature }} to not break when ... (#{{ .PullRequestNumber }})
- @{{ .GitHubUser }} enhanced {{ .Feature }} to ... (#{{ .PullRequestNumber }})
- @{{ .GitHubUser }} added {{ .Feature }} for ... (#{{ .PullRequestNumber }})
- @{{ .GitHubUser }} fixed {{ .Topic }} in the documentation so that ... (#{{ .PullRequestNumber }})
- @{{ .GitHubUser }} added {{ .Topic }} to the documentation (#{{ .PullRequestNumber }})
- @{{ .GitHubUser }} improved the documentation about {{ .Topic }} to also cover ... (#{{ .PullRequestNumber }})
```

25
.github/lock.yml vendored
View File

@@ -1,25 +0,0 @@
# Configuration for Lock Threads
# Repo: https://github.com/dessant/lock-threads-app
# App: https://github.com/apps/lock
# Number of days of inactivity before a closed issue or pull request is locked
daysUntilLock: 7
# Skip issues and pull requests created before a given timestamp. Timestamp must
# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable
skipCreatedBefore: false
# Issues and pull requests with these labels will be ignored. Set to `[]` to disable
exemptLabels: []
# Label to add before locking, such as `outdated`. Set to `false` to disable
lockLabel: false
# Comment to post before locking. Set to `false` to disable
lockComment: >
This thread has been automatically locked since there has not been
any recent activity after it was closed. Please open a new issue for
related bugs.
# Assign `resolved` as the reason for locking. Set to `false` to disable
setLockReason: true

View File

@@ -1,20 +0,0 @@
{
"extends": ["config:base"],
"packageRules": [
{
// automatically merge an update of runner
"matchPackageNames": ["actions/runner"],
"extractVersion": "^v(?<version>.*)$",
"automerge": true
}
],
"regexManagers": [
{
// use https://github.com/actions/runner/releases
"fileMatch": [".github/workflows/build-and-release-runners.yml"],
"matchStrings": ["RUNNER_VERSION: +(?<currentValue>.*?)\\n"],
"depNameTemplate": "actions/runner",
"datasourceTemplate": "github-releases"
}
]
}

66
.github/stale.yml vendored
View File

@@ -1,66 +0,0 @@
# Configuration for probot-stale - https://github.com/probot/stale
# Number of days of inactivity before an Issue or Pull Request becomes stale
daysUntilStale: 30
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
daysUntilClose: 14
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
onlyLabels: []
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
exemptLabels:
- pinned
- security
- enhancement
- refactor
- documentation
- chore
- needs-investigation
- bug
# Set to true to ignore issues in a project (defaults to false)
exemptProjects: false
# Set to true to ignore issues in a milestone (defaults to false)
exemptMilestones: false
# Set to true to ignore issues with an assignee (defaults to false)
exemptAssignees: false
# Label to use when marking as stale
staleLabel: stale
# Comment to post when marking as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions.
# Comment to post when removing the stale label.
# unmarkComment: >
# Your comment here.
# Comment to post when closing a stale Issue or Pull Request.
# closeComment: >
# Your comment here.
# Limit the number of actions per hour, from 1-30. Default is 30
limitPerRun: 30
# Limit to only `issues` or `pulls`
# only: issues
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
# pulls:
# daysUntilStale: 30
# markComment: >
# This pull request has been automatically marked as stale because it has not had
# recent activity. It will be closed if no further activity occurs. Thank you
# for your contributions.
# issues:
# exemptLabels:
# - confirmed

View File

@@ -1,121 +0,0 @@
name: Build and Release Runners
on:
pull_request:
branches:
- '**'
paths:
- 'runner/**'
- .github/workflows/build-and-release-runners.yml
push:
branches:
- master
paths:
- runner/patched/*
- runner/Dockerfile
- runner/Dockerfile.ubuntu.1804
- runner/Dockerfile.dindrunner
- runner/entrypoint.sh
- .github/workflows/build-and-release-runners.yml
env:
RUNNER_VERSION: 2.281.0
DOCKER_VERSION: 20.10.8
DOCKERHUB_USERNAME: summerwind
jobs:
build:
runs-on: ubuntu-latest
name: Build ${{ matrix.name }}-ubuntu-${{ matrix.os-version }}
strategy:
matrix:
include:
- name: actions-runner
os-version: 20.04
dockerfile: Dockerfile
- name: actions-runner
os-version: 18.04
dockerfile: Dockerfile.ubuntu.1804
- name: actions-runner-dind
os-version: 20.04
dockerfile: Dockerfile.dindrunner
steps:
- name: Set outputs
id: vars
run: echo ::set-output name=sha_short::${GITHUB_SHA::7}
- name: Checkout
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v1
if: ${{ github.event_name == 'push' || github.event_name == 'release' }}
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
- name: Build and Push Versioned Tags
uses: docker/build-push-action@v2
with:
context: ./runner
file: ./runner/${{ matrix.dockerfile }}
platforms: linux/amd64,linux/arm64
push: ${{ github.event_name != 'pull_request' }}
build-args: |
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
tags: |
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-ubuntu-${{ matrix.os-version }}
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-ubuntu-${{ matrix.os-version }}-${{ steps.vars.outputs.sha_short }}
latest-tags:
if: ${{ github.event_name == 'push' || github.event_name == 'release' }}
runs-on: ubuntu-latest
name: Build ${{ matrix.name }}-latest
strategy:
matrix:
include:
- name: actions-runner
dockerfile: Dockerfile
- name: actions-runner-dind
dockerfile: Dockerfile.dindrunner
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
- name: Build and Push Latest Tag
uses: docker/build-push-action@v2
with:
context: ./runner
file: ./runner/${{ matrix.dockerfile }}
platforms: linux/amd64,linux/arm64
push: true
build-args: |
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
tags: |
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:latest

64
.github/workflows/build-runner.yml vendored Normal file
View File

@@ -0,0 +1,64 @@
on:
pull_request:
branches:
- '**'
paths:
- 'runner/**'
- .github/workflows/build-runner.yml
push:
branches:
- master
paths:
- runner/patched/*
- runner/Dockerfile
- runner/dindrunner.Dockerfile
- runner/entrypoint.sh
- .github/workflows/build-runner.yml
name: Runner
jobs:
build:
runs-on: ubuntu-latest
name: Build ${{ matrix.name }}
strategy:
matrix:
include:
- name: actions-runner
dockerfile: Dockerfile
- name: actions-runner-dind
dockerfile: dindrunner.Dockerfile
env:
RUNNER_VERSION: 2.275.1
DOCKER_VERSION: 19.03.12
DOCKERHUB_USERNAME: ${{ github.repository_owner }}
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v1
if: ${{ github.event_name == 'push' }}
with:
username: ${{ github.repository_owner }}
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
- name: Build [and Push]
uses: docker/build-push-action@v2
with:
context: ./runner
file: ./runner/${{ matrix.dockerfile }}
platforms: linux/amd64,linux/arm64
push: ${{ github.event_name != 'pull_request' }}
build-args: |
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
tags: |
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:latest

View File

@@ -1,77 +0,0 @@
name: Lint and Test Charts
on:
push:
paths:
- 'charts/**'
- '!charts/actions-runner-controller/docs/**'
- '!charts/actions-runner-controller/*.md'
- '.github/**'
- '!.github/*.md'
workflow_dispatch:
env:
KUBE_SCORE_VERSION: 1.10.0
HELM_VERSION: v3.4.1
jobs:
lint-test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v1
with:
version: ${{ env.HELM_VERSION }}
- name: Set up kube-score
run: |
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
chmod 755 kube-score
- name: Kube-score generated manifests
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
--ignore-test pod-networkpolicy
--ignore-test deployment-has-poddisruptionbudget
--ignore-test deployment-has-host-podantiaffinity
--ignore-test container-security-context
--ignore-test pod-probes
--ignore-test container-image-tag
--enable-optional-test container-security-context-privileged
--enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v2
with:
python-version: 3.7
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.1.0
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
if [[ -n "$changed" ]]; then
echo "::set-output name=changed::true"
fi
- name: Run chart-testing (lint)
run: ct lint --config charts/.ci/ct-config.yaml
- name: Create kind cluster
uses: helm/kind-action@v1.2.0
if: steps.list-changed.outputs.changed == 'true'
# We need cert-manager already installed in the cluster because we assume the CRDs exist
- name: Install cert-manager
run: |
helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
if: steps.list-changed.outputs.changed == 'true'
- name: Run chart-testing (install)
run: ct install --config charts/.ci/ct-config.yaml

View File

@@ -1,103 +0,0 @@
name: Publish helm chart
on:
push:
branches:
- master
- main # assume that the branch name may change in future
paths:
- 'charts/**'
- '!charts/actions-runner-controller/docs/**'
- '.github/**'
- '!**.md'
workflow_dispatch:
env:
KUBE_SCORE_VERSION: 1.10.0
HELM_VERSION: v3.4.1
jobs:
lint-chart:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v1
with:
version: ${{ env.HELM_VERSION }}
- name: Set up kube-score
run: |
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
chmod 755 kube-score
- name: Kube-score generated manifests
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
--ignore-test pod-networkpolicy
--ignore-test deployment-has-poddisruptionbudget
--ignore-test deployment-has-host-podantiaffinity
--ignore-test container-security-context
--ignore-test pod-probes
--ignore-test container-image-tag
--enable-optional-test container-security-context-privileged
--enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v2
with:
python-version: 3.7
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.1.0
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
if [[ -n "$changed" ]]; then
echo "::set-output name=changed::true"
fi
- name: Run chart-testing (lint)
run: ct lint --config charts/.ci/ct-config.yaml
- name: Create kind cluster
uses: helm/kind-action@v1.2.0
if: steps.list-changed.outputs.changed == 'true'
# We need cert-manager already installed in the cluster because we assume the CRDs exist
- name: Install cert-manager
run: |
helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
if: steps.list-changed.outputs.changed == 'true'
- name: Run chart-testing (install)
run: ct install --config charts/.ci/ct-config.yaml
if: steps.list-changed.outputs.changed == 'true'
publish-chart:
runs-on: ubuntu-latest
needs: lint-chart
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.2.1
env:
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"

View File

@@ -7,12 +7,8 @@ jobs:
runs-on: ubuntu-latest
name: Release
env:
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
DOCKERHUB_USERNAME: ${{ github.repository_owner }}
steps:
- name: Set outputs
id: vars
run: echo ::set-output name=sha_short::${GITHUB_SHA::7}
- name: Checkout
uses: actions/checkout@v2
@@ -47,7 +43,7 @@ jobs:
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USER }}
username: ${{ github.repository_owner }}
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
- name: Build and Push
@@ -56,8 +52,5 @@ jobs:
file: Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:latest
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}-${{ steps.vars.outputs.sha_short }}
tags: ${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}

View File

@@ -6,9 +6,6 @@ on:
- master
paths-ignore:
- 'runner/**'
- .github/workflows/build-and-release-runners.yml
- '*.md'
- '.gitignore'
jobs:
test:
@@ -17,15 +14,11 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: '^1.16.5'
- run: go version
- name: Install kubebuilder
run: |
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_linux_amd64.tar.gz
tar zxvf kubebuilder_2.3.2_linux_amd64.tar.gz
sudo mv kubebuilder_2.3.2_linux_amd64 /usr/local/kubebuilder
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.2.0/kubebuilder_2.2.0_linux_amd64.tar.gz
tar zxvf kubebuilder_2.2.0_linux_amd64.tar.gz
sudo mv kubebuilder_2.2.0_linux_amd64 /usr/local/kubebuilder
- name: Run tests
run: make test
- name: Verify manifests are up-to-date

View File

@@ -4,15 +4,13 @@ on:
- master
paths-ignore:
- "runner/**"
- "**.md"
- ".gitignore"
jobs:
build:
runs-on: ubuntu-latest
name: release-latest
env:
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
DOCKERHUB_USERNAME: ${{ github.repository_owner }}
steps:
- name: Checkout
uses: actions/checkout@v2
@@ -29,16 +27,14 @@ jobs:
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USER }}
username: ${{ github.repository_owner }}
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
# Considered unstable builds
# See Issue #285, PR #286, and PR #323 for more information
- name: Build and Push
uses: docker/build-push-action@v2
with:
file: Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:canary
tags: ${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:latest

10
.gitignore vendored
View File

@@ -1,4 +1,3 @@
# Deploy Assets
release
# Binaries for programs and plugins
@@ -16,21 +15,14 @@ bin
*.out
# Kubernetes Generated files - skip generated files, except for vendored files
!vendor/**/zz_generated.*
# editor and IDE paraphernalia
.vscode
.idea
*.swp
*.swo
*~
.envrc
.env
.test.env
*.pem
# OS
.DS_STORE
/test-assets

View File

@@ -1,142 +0,0 @@
## Contributing
### How to Contribute a Patch
Depending on what you are patching depends on how you should go about it. Below are some guides on how to test patches locally as well as develop the controller and runners.
When sumitting a PR for a change please provide evidence that your change works as we still need to work on improving the CI of the project. Some resources are provided for helping achieve this, see this guide for details.
#### Running an End to End Test
> **Notes for Ubuntu 20.04+ users**
>
> If you're using Ubuntu 20.04 or greater, you might have installed `docker` with `snap`.
>
> If you want to stick with `snap`-provided `docker`, do not forget to set `TMPDIR` to
> somewhere under `$HOME`.
> Otherwise `kind load docker-image` fail while running `docker save`.
> See https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap for more information.
To test your local changes against both PAT and App based authentication please run the `acceptance` make target with the authentication configuration details provided:
```shell
# This sets `VERSION` envvar to some appropriate value
. hack/make-env.sh
DOCKER_USER=*** \
GITHUB_TOKEN=*** \
APP_ID=*** \
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
INSTALLATION_ID=*** \
make acceptance
```
**Rerunning a failed test**
When one of tests run by `make acceptance` failed, you'd probably like to rerun only the failed one.
It can be done by `make acceptance/run` and by setting the combination of `ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm|kubectl` and `ACCEPTANCE_TEST_SECRET_TYPE=token|app` values that failed (note, you just need to set the corresponding authentication configuration in this circumstance)
In the example below, we rerun the test for the combination `ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=token` only:
```shell
DOCKER_USER=*** \
GITHUB_TOKEN=*** \
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm
ACCEPTANCE_TEST_SECRET_TYPE=token \
make acceptance/run
```
**Testing in a non-kind cluster**
If you prefer to test in a non-kind cluster, you can instead run:
```shell
KUBECONFIG=path/to/kubeconfig \
DOCKER_USER=*** \
GITHUB_TOKEN=*** \
APP_ID=*** \
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
INSTALLATION_ID=*** \
ACCEPTANCE_TEST_SECRET_TYPE=token \
make docker-build acceptance/setup \
acceptance/deploy \
acceptance/tests
```
#### Developing the Controller
Rerunning the whole acceptance test suite from scratch on every little change to the controller, the runner, and the chart would be counter-productive.
To make your development cycle faster, use the below command to update deploy and update all the three:
```shell
# Let assume we have all other envvars like DOCKER_USER, GITHUB_TOKEN already set,
# The below command will (re)build `actions-runner-controller:controller1` and `actions-runner:runner1`,
# load those into kind nodes, and then rerun kubectl or helm to install/upgrade the controller,
# and finally upgrade the runner deployment to use the new runner image.
#
# As helm 3 and kubectl is unable to recreate a pod when no tag change,
# you either need to bump VERSION and RUNNER_TAG on each run,
# or manually run `kubectl delete pod $POD` on respective pods for changes to actually take effect.
VERSION=controller1 \
RUNNER_TAG=runner1 \
make acceptance/pull acceptance/kind docker-build acceptance/load acceptance/deploy
```
If you've already deployed actions-runner-controller and only want to recreate pods to use the newer image, you can run:
```shell
NAME=$DOCKER_USER/actions-runner-controller \
make docker-build acceptance/load && \
kubectl -n actions-runner-system delete po $(kubectl -n actions-runner-system get po -ojsonpath={.items[*].metadata.name})
```
Similarly, if you'd like to recreate runner pods with the newer runner image,
```shell
NAME=$DOCKER_USER/actions-runner make \
-C runner docker-{build,push}-ubuntu && \
(kubectl get po -ojsonpath={.items[*].metadata.name} | xargs -n1 kubectl delete po)
```
#### Developing the Runners
**Tests**
A set of example pipelines (./acceptance/pipelines) are provided in this repository which you can use to validate your runners are working as expected. When raising a PR please run the relevant suites to prove your change hasn't broken anything.
**Running Ginkgo Tests**
You can run the integration test suite that is written in Ginkgo with:
```shell
make test-with-deps
```
This will firstly install a few binaries required to setup the integration test environment and then runs `go test` to start the Ginkgo test.
If you don't want to use `make`, like when you're running tests from your IDE, install required binaries to `/usr/local/kubebuilder/bin`. That's the directory in which controller-runtime's `envtest` framework locates the binaries.
```shell
sudo mkdir -p /usr/local/kubebuilder/bin
make kube-apiserver etcd
sudo mv test-assets/{etcd,kube-apiserver} /usr/local/kubebuilder/bin/
go test -v -run TestAPIs github.com/actions-runner-controller/actions-runner-controller/controllers
```
To run Ginkgo tests selectively, set the pattern of target test names to `GINKGO_FOCUS`.
All the Ginkgo test that matches `GINKGO_FOCUS` will be run.
```shell
GINKGO_FOCUS='[It] should create a new Runner resource from the specified template, add a another Runner on replicas increased, and removes all the replicas when set to 0' \
go test -v -run TestAPIs github.com/actions-runner-controller/actions-runner-controller/controllers
```
#### Helm Version Bumps
**Chart Version :** When bumping the chart version follow semantic versioning https://semver.org/<br />
**App Version :** When bumping the app version you will also need to bump the chart version too. Again, follow semantic versioning when bumping the chart.
To determine if you need to bump the MAJOR, MINOR or PATCH versions you will need to review the changes between the previous app version and the new app version and / or ask for a maintainer to advise.

View File

@@ -1,5 +1,5 @@
# Build the manager binary
FROM golang:1.17 as builder
FROM golang:1.15 as builder
ARG TARGETPLATFORM
@@ -22,8 +22,7 @@ COPY . .
RUN export GOOS=$(echo ${TARGETPLATFORM} | cut -d / -f1) && \
export GOARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) && \
GOARM=$(echo ${TARGETPLATFORM} | cut -d / -f3 | cut -c2-) && \
go build -a -o manager main.go && \
go build -a -o github-webhook-server ./cmd/githubwebhookserver
go build -a -o manager main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
@@ -32,7 +31,6 @@ FROM gcr.io/distroless/static:nonroot
WORKDIR /
COPY --from=builder /workspace/manager .
COPY --from=builder /workspace/github-webhook-server .
USER nonroot:nonroot

199
Makefile
View File

@@ -1,32 +1,11 @@
ifdef DOCKER_USER
NAME ?= ${DOCKER_USER}/actions-runner-controller
else
NAME ?= summerwind/actions-runner-controller
endif
DOCKER_USER ?= $(shell echo ${NAME} | cut -d / -f1)
NAME ?= summerwind/actions-runner-controller
VERSION ?= latest
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
RUNNER_TAG ?= ${VERSION}
TEST_REPO ?= ${DOCKER_USER}/actions-runner-controller
TEST_ORG ?=
TEST_ORG_REPO ?=
TEST_EPHEMERAL ?= false
SYNC_PERIOD ?= 5m
USE_RUNNERSET ?=
RUNNER_FEATURE_FLAG_EPHEMERAL ?=
KUBECONTEXT ?= kind-acceptance
CLUSTER ?= acceptance
CERT_MANAGER_VERSION ?= v1.1.1
# From https://github.com/VictoriaMetrics/operator/pull/44
YAML_DROP=$(YQ) delete --inplace
# If you encounter errors like the below, you are very likely to update this to follow e.g. CRD version change:
# CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: spec.preserveUnknownFields: Invalid value: true: must be false in order to use defaults in the schema
YAML_DROP_PREFIX=spec.versions[0].schema.openAPIV3Schema.properties.spec.properties
YAML_DROP_PREFIX=spec.validation.openAPIV3Schema.properties.spec.properties
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
CRD_OPTIONS ?= "crd:trivialVersions=true,generateEmbeddedObjectMeta=true"
CRD_OPTIONS ?= "crd:trivialVersions=true"
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
@@ -35,8 +14,6 @@ else
GOBIN=$(shell go env GOBIN)
endif
TEST_ASSETS=$(PWD)/test-assets
# default list of platforms for which multiarch image is built
ifeq (${PLATFORMS}, )
export PLATFORMS="linux/amd64,linux/arm64"
@@ -45,8 +22,8 @@ endif
# if IMG_RESULT is unspecified, by default the image will be pushed to registry
ifeq (${IMG_RESULT}, load)
export PUSH_ARG="--load"
# if load is specified, image will be built only for the build machine architecture.
export PLATFORMS="local"
# if load is specified, image will be built only for the build machine architecture.
export PLATFORMS="local"
else ifeq (${IMG_RESULT}, cache)
# if cache is specified, image will only be available in the build cache, it won't be pushed or loaded
# therefore no PUSH_ARG will be specified
@@ -56,18 +33,9 @@ endif
all: manager
GO_TEST_ARGS ?= -short
# Run tests
test: generate fmt vet manifests
go test $(GO_TEST_ARGS) ./... -coverprofile cover.out
test-with-deps: kube-apiserver etcd kubectl
# See https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#pkg-constants
TEST_ASSET_KUBE_APISERVER=$(KUBE_APISERVER_BIN) \
TEST_ASSET_ETCD=$(ETCD_BIN) \
TEST_ASSET_KUBECTL=$(KUBECTL_BIN) \
make test
go test ./... -coverprofile cover.out
# Build manager binary
manager: generate fmt vet
@@ -128,9 +96,12 @@ generate: controller-gen
$(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths="./..."
# Build the docker image
docker-build:
docker-build: test
docker build . -t ${NAME}:${VERSION}
docker build runner -t ${RUNNER_NAME}:${RUNNER_TAG} --build-arg TARGETPLATFORM=$(shell arch)
# Push the docker image
docker-push:
docker push ${NAME}:${VERSION}
docker-buildx:
export DOCKER_CLI_EXPERIMENTAL=enabled
@@ -144,11 +115,6 @@ docker-buildx:
-f Dockerfile \
. ${PUSH_ARG}
# Push the docker image
docker-push:
docker push ${NAME}:${VERSION}
docker push ${RUNNER_NAME}:${RUNNER_TAG}
# Generate the release manifest file
release: manifests
cd config/manager && kustomize edit set image controller=${NAME}:${VERSION}
@@ -160,41 +126,19 @@ release/clean:
rm -rf release
.PHONY: acceptance
acceptance: release/clean acceptance/pull docker-build release
ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/run
ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/run
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/run
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/run
acceptance/run: acceptance/kind acceptance/load acceptance/setup acceptance/deploy acceptance/tests acceptance/teardown
acceptance: release/clean docker-build docker-push release
ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
acceptance/kind:
kind create cluster --name ${CLUSTER} --config acceptance/kind.yaml
# Set TMPDIR to somewhere under $HOME when you use docker installed with Ubuntu snap
# Otherwise `load docker-image` fail while running `docker save`.
# See https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap
acceptance/load:
kind load docker-image ${NAME}:${VERSION} --name ${CLUSTER}
kind load docker-image quay.io/brancz/kube-rbac-proxy:v0.10.0 --name ${CLUSTER}
kind load docker-image ${RUNNER_NAME}:${RUNNER_TAG} --name ${CLUSTER}
kind load docker-image docker:dind --name ${CLUSTER}
kind load docker-image quay.io/jetstack/cert-manager-controller:$(CERT_MANAGER_VERSION) --name ${CLUSTER}
kind load docker-image quay.io/jetstack/cert-manager-cainjector:$(CERT_MANAGER_VERSION) --name ${CLUSTER}
kind load docker-image quay.io/jetstack/cert-manager-webhook:$(CERT_MANAGER_VERSION) --name ${CLUSTER}
kubectl cluster-info --context ${KUBECONTEXT}
# Pull the docker images for acceptance
acceptance/pull:
docker pull quay.io/brancz/kube-rbac-proxy:v0.10.0
docker pull docker:dind
docker pull quay.io/jetstack/cert-manager-controller:$(CERT_MANAGER_VERSION)
docker pull quay.io/jetstack/cert-manager-cainjector:$(CERT_MANAGER_VERSION)
docker pull quay.io/jetstack/cert-manager-webhook:$(CERT_MANAGER_VERSION)
kind create cluster --name acceptance
kubectl cluster-info --context kind-acceptance
acceptance/setup:
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/$(CERT_MANAGER_VERSION)/cert-manager.yaml #kubectl create namespace actions-runner-system
kubectl -n cert-manager wait deploy/cert-manager-cainjector --for condition=available --timeout 90s
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.0.4/cert-manager.yaml #kubectl create namespace actions-runner-system
kubectl -n cert-manager wait deploy/cert-manager-cainjector --for condition=available --timeout 60s
kubectl -n cert-manager wait deploy/cert-manager-webhook --for condition=available --timeout 60s
kubectl -n cert-manager wait deploy/cert-manager --for condition=available --timeout 60s
kubectl create namespace actions-runner-system || true
@@ -202,37 +146,18 @@ acceptance/setup:
sleep 5
acceptance/teardown:
kind delete cluster --name ${CLUSTER}
acceptance/deploy:
NAME=${NAME} DOCKER_USER=${DOCKER_USER} VERSION=${VERSION} RUNNER_NAME=${RUNNER_NAME} RUNNER_TAG=${RUNNER_TAG} TEST_REPO=${TEST_REPO} \
TEST_ORG=${TEST_ORG} TEST_ORG_REPO=${TEST_ORG_REPO} SYNC_PERIOD=${SYNC_PERIOD} \
USE_RUNNERSET=${USE_RUNNERSET} \
TEST_EPHEMERAL=${TEST_EPHEMERAL} \
RUNNER_FEATURE_FLAG_EPHEMERAL=${RUNNER_FEATURE_FLAG_EPHEMERAL} \
acceptance/deploy.sh
kind delete cluster --name acceptance
acceptance/tests:
acceptance/deploy.sh
acceptance/checks.sh
# We use -count=1 instead of `go clean -testcache`
# See https://terratest.gruntwork.io/docs/testing-best-practices/avoid-test-caching/
.PHONY: e2e
e2e:
go test -count=1 -v -timeout 600s -run '^TestE2E$$' ./test/e2e
# Upload release file to GitHub.
github-release: release
ghr ${VERSION} release/
# Find or download controller-gen
#
# Note that controller-gen newer than 0.4.1 is needed for https://github.com/kubernetes-sigs/controller-tools/issues/444#issuecomment-680168439
# Otherwise we get errors like the below:
# Error: failed to install CRD crds/actions.summerwind.dev_runnersets.yaml: CustomResourceDefinition.apiextensions.k8s.io "runnersets.actions.summerwind.dev" is invalid: [spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[containers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property, spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[initContainers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property]
#
# Note that controller-gen newer than 0.6.0 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
# Otherwise ObjectMeta embedded in Spec results in empty on the storage.
# find or download controller-gen
# download controller-gen if necessary
controller-gen:
ifeq (, $(shell which controller-gen))
ifeq (, $(wildcard $(GOBIN)/controller-gen))
@@ -241,7 +166,7 @@ ifeq (, $(wildcard $(GOBIN)/controller-gen))
CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\
cd $$CONTROLLER_GEN_TMP_DIR ;\
go mod init tmp ;\
go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.6.0 ;\
go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.3.0 ;\
rm -rf $$CONTROLLER_GEN_TMP_DIR ;\
}
endif
@@ -266,77 +191,3 @@ ifeq (, $(wildcard $(GOBIN)/yq))
}
endif
YQ=$(GOBIN)/yq
OS_NAME := $(shell uname -s | tr A-Z a-z)
# find or download etcd
etcd:
ifeq (, $(shell which etcd))
ifeq (, $(wildcard $(TEST_ASSETS)/etcd))
@{ \
set -xe ;\
INSTALL_TMP_DIR=$$(mktemp -d) ;\
cd $$INSTALL_TMP_DIR ;\
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
mkdir -p $(TEST_ASSETS) ;\
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/etcd $(TEST_ASSETS)/etcd ;\
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
rm -rf $$INSTALL_TMP_DIR ;\
}
ETCD_BIN=$(TEST_ASSETS)/etcd
else
ETCD_BIN=$(TEST_ASSETS)/etcd
endif
else
ETCD_BIN=$(shell which etcd)
endif
# find or download kube-apiserver
kube-apiserver:
ifeq (, $(shell which kube-apiserver))
ifeq (, $(wildcard $(TEST_ASSETS)/kube-apiserver))
@{ \
set -xe ;\
INSTALL_TMP_DIR=$$(mktemp -d) ;\
cd $$INSTALL_TMP_DIR ;\
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
mkdir -p $(TEST_ASSETS) ;\
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/etcd $(TEST_ASSETS)/etcd ;\
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
rm -rf $$INSTALL_TMP_DIR ;\
}
KUBE_APISERVER_BIN=$(TEST_ASSETS)/kube-apiserver
else
KUBE_APISERVER_BIN=$(TEST_ASSETS)/kube-apiserver
endif
else
KUBE_APISERVER_BIN=$(shell which kube-apiserver)
endif
# find or download kubectl
kubectl:
ifeq (, $(shell which kubectl))
ifeq (, $(wildcard $(TEST_ASSETS)/kubectl))
@{ \
set -xe ;\
INSTALL_TMP_DIR=$$(mktemp -d) ;\
cd $$INSTALL_TMP_DIR ;\
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
mkdir -p $(TEST_ASSETS) ;\
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/etcd $(TEST_ASSETS)/etcd ;\
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
rm -rf $$INSTALL_TMP_DIR ;\
}
KUBECTL_BIN=$(TEST_ASSETS)/kubectl
else
KUBECTL_BIN=$(TEST_ASSETS)/kubectl
endif
else
KUBECTL_BIN=$(shell which kubectl)
endif

View File

@@ -1,5 +1,5 @@
domain: summerwind.dev
repo: github.com/actions-runner-controller/actions-runner-controller
repo: github.com/summerwind/actions-runner-controller
resources:
- group: actions
kind: Runner

966
README.md

File diff suppressed because it is too large Load Diff

View File

@@ -1,84 +1,29 @@
#!/usr/bin/env bash
set +e
set -e
repo_runnerdeployment_passed="skipped"
repo_runnerset_passed="skipped"
runner_name=
echo "Checking if RunnerDeployment repo test is set"
if [ "${TEST_REPO}" ] && [ ! "${USE_RUNNERSET}" ]; then
runner_name=
count=0
while [ $count -le 30 ]; do
echo "Finding Runner ..."
runner_name=$(kubectl get runner --output=jsonpath="{.items[*].metadata.name}")
if [ "${runner_name}" ]; then
while [ $count -le 30 ]; do
runner_pod_name=
echo "Found Runner \""${runner_name}"\""
echo "Finding underlying pod ..."
runner_pod_name=$(kubectl get pod --output=jsonpath="{.items[*].metadata.name}" | grep ${runner_name})
if [ "${runner_pod_name}" ]; then
echo "Found underlying pod \""${runner_pod_name}"\""
echo "Waiting for pod \""${runner_pod_name}"\" to become ready..."
kubectl wait pod/${runner_pod_name} --for condition=ready --timeout 270s
break 2
fi
sleep 1
let "count=count+1"
done
fi
sleep 1
let "count=count+1"
done
if [ $count -ge 30 ]; then
repo_runnerdeployment_passed=false
else
repo_runnerdeployment_passed=true
fi
echo "Checking if RunnerSet repo test is set"
elif [ "${TEST_REPO}" ] && [ "${USE_RUNNERSET}" ]; then
runnerset_name=
count=0
while [ $count -le 30 ]; do
echo "Finding RunnerSet ..."
runnerset_name=$(kubectl get runnerset --output=jsonpath="{.items[*].metadata.name}")
if [ "${runnerset_name}" ]; then
while [ $count -le 30 ]; do
runnerset_pod_name=
echo "Found RunnerSet \""${runnerset_name}"\""
echo "Finding underlying pod ..."
runnerset_pod_name=$(kubectl get pod --output=jsonpath="{.items[*].metadata.name}" | grep ${runnerset_name})
echo "BEFORE IF"
if [ "${runnerset_pod_name}" ]; then
echo "AFTER IF"
echo "Found underlying pod \""${runnerset_pod_name}"\""
echo "Waiting for pod \""${runnerset_pod_name}"\" to become ready..."
kubectl wait pod/${runnerset_pod_name} --for condition=ready --timeout 270s
break 2
fi
sleep 1
let "count=count+1"
done
fi
sleep 1
let "count=count+1"
done
if [ $count -ge 30 ]; then
repo_runnerset_passed=false
else
repo_runnerset_passed=true
fi
fi
while [ -z "${runner_name}" ]; do
echo Finding the runner... 1>&2
sleep 1
runner_name=$(kubectl get runner --output=jsonpath="{.items[*].metadata.name}")
done
if [ ${repo_runnerset_passed} == true ] || [ ${repo_runnerset_passed} == "skipped" ] && \
[ ${repo_runnerdeployment_passed} == true ] || [ ${repo_runnerdeployment_passed} == "skipped" ]; then
echo "INFO : All tests passed or skipped"
echo "RunnerSet Repo Test Status : ${repo_runnerset_passed}"
echo "RunnerDeployment Repo Test Status : ${repo_runnerdeployment_passed}"
else
echo "ERROR : Some tests failed"
echo "RunnerSet Repo Test Status : ${repo_runnerset_passed}"
echo "RunnerDeployment Repo Test Status : ${repo_runnerdeployment_passed}"
exit 1
fi
echo Found runner ${runner_name}.
pod_name=
while [ -z "${pod_name}" ]; do
echo Finding the runner pod... 1>&2
sleep 1
pod_name=$(kubectl get pod --output=jsonpath="{.items[*].metadata.name}" | grep ${runner_name})
done
echo Found pod ${pod_name}.
echo Waiting for pod ${runner_name} to become ready... 1>&2
kubectl wait pod/${runner_name} --for condition=ready --timeout 180s
echo All tests passed. 1>&2

View File

@@ -4,14 +4,10 @@ set -e
tpe=${ACCEPTANCE_TEST_SECRET_TYPE}
VALUES_FILE=${VALUES_FILE:-$(dirname $0)/values.yaml}
if [ "${tpe}" == "token" ]; then
if ! kubectl get secret controller-manager -n actions-runner-system >/dev/null; then
kubectl create secret generic controller-manager \
-n actions-runner-system \
--from-literal=github_token=${GITHUB_TOKEN:?GITHUB_TOKEN must not be empty}
fi
kubectl create secret generic controller-manager \
-n actions-runner-system \
--from-literal=github_token=${GITHUB_TOKEN:?GITHUB_TOKEN must not be empty}
elif [ "${tpe}" == "app" ]; then
kubectl create secret generic controller-manager \
-n actions-runner-system \
@@ -30,46 +26,17 @@ if [ "${tool}" == "helm" ]; then
charts/actions-runner-controller \
-n actions-runner-system \
--create-namespace \
--set syncPeriod=${SYNC_PERIOD} \
--set authSecret.create=false \
--set image.repository=${NAME} \
--set image.tag=${VERSION} \
-f ${VALUES_FILE}
kubectl apply -f charts/actions-runner-controller/crds
kubectl -n actions-runner-system wait deploy/actions-runner-controller --for condition=available --timeout 60s
--set syncPeriod=5m
kubectl -n actions-runner-system wait deploy/actions-runner-controller --for condition=available
else
kubectl apply \
-n actions-runner-system \
-f release/actions-runner-controller.yaml
kubectl -n actions-runner-system wait deploy/controller-manager --for condition=available --timeout 120s
kubectl -n actions-runner-system wait deploy/controller-manager --for condition=available --timeout 60s
fi
# Adhocly wait for some time until actions-runner-controller's admission webhook gets ready
sleep 20
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
if [ -n "${TEST_REPO}" ]; then
if [ -n "USE_RUNNERSET" ]; then
cat acceptance/testdata/repo.runnerset.yaml | envsubst | kubectl apply -f -
cat acceptance/testdata/repo.runnerset.hra.yaml | envsubst | kubectl apply -f -
else
echo 'Deploying runnerdeployment and hra. Set USE_RUNNERSET if you want to deploy runnerset instead.'
cat acceptance/testdata/repo.runnerdeploy.yaml | envsubst | kubectl apply -f -
cat acceptance/testdata/repo.hra.yaml | envsubst | kubectl apply -f -
fi
else
echo 'Skipped deploying runnerdeployment and hra. Set TEST_REPO to "yourorg/yourrepo" to deploy.'
fi
if [ -n "${TEST_ORG}" ]; then
cat acceptance/testdata/org.runnerdeploy.yaml | envsubst | kubectl apply -f -
if [ -n "${TEST_ORG_REPO}" ]; then
cat acceptance/testdata/org.hra.yaml | envsubst | kubectl apply -f -
else
echo 'Skipped deploying organizational hra. Set TEST_ORG_REPO to "yourorg/yourrepo" to deploy.'
fi
else
echo 'Skipped deploying organizational runnerdeployment. Set TEST_ORG to deploy.'
fi
kubectl apply \
-f acceptance/testdata/runnerdeploy.yaml

View File

@@ -1,10 +0,0 @@
apiVersion: kind.x-k8s.io/v1alpha4
kind: Cluster
nodes:
- role: control-plane
extraPortMappings:
- containerPort: 31000
hostPort: 31000
listenAddress: "0.0.0.0"
protocol: tcp
#- role: worker

View File

@@ -1,36 +0,0 @@
name: EKS Integration Tests
on:
workflow_dispatch:
env:
IRSA_ROLE_ARN:
ASSUME_ROLE_ARN:
AWS_REGION:
jobs:
assume-role-in-runner-test:
runs-on: ['self-hosted', 'Linux']
steps:
- name: Test aws-actions/configure-aws-credentials Action
uses: aws-actions/configure-aws-credentials@v1
with:
aws-region: ${{ env.AWS_REGION }}
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
role-duration-seconds: 900
assume-role-in-container-test:
runs-on: ['self-hosted', 'Linux']
container:
image: amazon/aws-cli
env:
AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
AWS_ROLE_ARN: ${{ env.IRSA_ROLE_ARN }}
volumes:
- /var/run/secrets/eks.amazonaws.com/serviceaccount/token:/var/run/secrets/eks.amazonaws.com/serviceaccount/token
steps:
- name: Test aws-actions/configure-aws-credentials Action in container
uses: aws-actions/configure-aws-credentials@v1
with:
aws-region: ${{ env.AWS_REGION }}
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
role-duration-seconds: 900

View File

@@ -1,83 +0,0 @@
name: Runner Integration Tests
on:
workflow_dispatch:
env:
ImageOS: ubuntu18 # Used by ruby/setup-ruby action | Update me for the runner OS version you are testing against
jobs:
run-step-in-container-test:
runs-on: ['self-hosted', 'Linux']
container:
image: alpine
steps:
- name: Test we are working in the container
run: |
if [[ $(sed -n '2p' < /etc/os-release | cut -d "=" -f2) != "alpine" ]]; then
echo "::error ::Failed OS detection test, could not match /etc/os-release with alpine. Are we really running in the container?"
echo "/etc/os-release below:"
cat /etc/os-release
exit 1
fi
setup-python-test:
runs-on: ['self-hosted', 'Linux']
steps:
- name: Print native Python environment
run: |
which python
python --version
- uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Test actions/setup-python works
run: |
VERSION=$(python --version 2>&1 | cut -d ' ' -f2 | cut -d '.' -f1-2)
if [[ $VERSION != '3.9' ]]; then
echo "Python version detected : $(python --version 2>&1)"
echo "::error ::Detected python failed setup version test, could not match version with version specified in the setup action"
exit 1
else
echo "Python version detected : $(python --version 2>&1)"
fi
setup-node-test:
runs-on: ['self-hosted', 'Linux']
steps:
- uses: actions/setup-node@v2
with:
node-version: '12'
- name: Test actions/setup-node works
run: |
VERSION=$(node --version | cut -c 2- | cut -d '.' -f1)
if [[ $VERSION != '12' ]]; then
echo "Node version detected : $(node --version 2>&1)"
echo "::error ::Detected node failed setup version test, could not match version with version specified in the setup action"
exit 1
else
echo "Node version detected : $(node --version 2>&1)"
fi
setup-ruby-test:
runs-on: ['self-hosted', 'Linux']
steps:
- uses: ruby/setup-ruby@v1
with:
ruby-version: 3.0
bundler-cache: true
- name: Test ruby/setup-ruby works
run: |
VERSION=$(ruby --version | cut -d ' ' -f2 | cut -d '.' -f1-2)
if [[ $VERSION != '3.0' ]]; then
echo "Ruby version detected : $(ruby --version 2>&1)"
echo "::error ::Detected ruby failed setup version test, could not match version with version specified in the setup action"
exit 1
else
echo "Ruby version detected : $(ruby --version 2>&1)"
fi
python-shell-test:
runs-on: ['self-hosted', 'Linux']
steps:
- name: Test Python shell works
run: |
import os
print(os.environ['PATH'])
shell: python

View File

@@ -1,36 +0,0 @@
apiVersion: actions.summerwind.dev/v1alpha1
kind: HorizontalRunnerAutoscaler
metadata:
name: org
spec:
scaleTargetRef:
name: org-runnerdeploy
scaleUpTriggers:
- githubEvent:
checkRun:
types: ["created"]
status: "queued"
amount: 1
duration: "1m"
scheduledOverrides:
- startTime: "2021-05-11T16:05:00+09:00"
endTime: "2021-05-11T16:40:00+09:00"
minReplicas: 2
- startTime: "2021-05-01T00:00:00+09:00"
endTime: "2021-05-03T00:00:00+09:00"
recurrenceRule:
frequency: Weekly
untilTime: "2022-05-01T00:00:00+09:00"
minReplicas: 0
minReplicas: 0
maxReplicas: 5
# Used to test that HRA is working for org runners
metrics:
- type: PercentageRunnersBusy
scaleUpThreshold: '0.75'
scaleDownThreshold: '0.3'
scaleUpFactor: '2'
scaleDownFactor: '0.5'
- type: TotalNumberOfQueuedAndInProgressWorkflowRuns
repositoryNames:
- ${TEST_ORG_REPO}

View File

@@ -1,37 +0,0 @@
apiVersion: actions.summerwind.dev/v1alpha1
kind: RunnerDeployment
metadata:
name: org-runnerdeploy
spec:
# replicas: 1
template:
spec:
organization: ${TEST_ORG}
#
# Custom runner image
#
image: ${RUNNER_NAME}:${RUNNER_TAG}
imagePullPolicy: IfNotPresent
#
# dockerd within runner container
#
## Replace `mumoshu/actions-runner-dind:dev` with your dind image
#dockerdWithinRunnerContainer: true
#image: mumoshu/actions-runner-dind:dev
#
# Set the MTU used by dockerd-managed network interfaces (including docker-build-ubuntu)
#
#dockerMTU: 1450
#Runner group
# labels:
# - "mylabel 1"
# - "mylabel 2"
#
# Non-standard working directory
#
# workDir: "/"

View File

@@ -1,25 +0,0 @@
apiVersion: actions.summerwind.dev/v1alpha1
kind: HorizontalRunnerAutoscaler
metadata:
name: actions-runner-aos-autoscaler
spec:
scaleTargetRef:
name: example-runnerdeploy
scaleUpTriggers:
- githubEvent:
checkRun:
types: ["created"]
status: "queued"
amount: 1
duration: "1m"
minReplicas: 0
maxReplicas: 5
metrics:
- type: PercentageRunnersBusy
scaleUpThreshold: '0.75'
scaleDownThreshold: '0.3'
scaleUpFactor: '2'
scaleDownFactor: '0.5'
- type: TotalNumberOfQueuedAndInProgressWorkflowRuns
repositoryNames:
- ${TEST_REPO}

View File

@@ -1,37 +0,0 @@
apiVersion: actions.summerwind.dev/v1alpha1
kind: RunnerDeployment
metadata:
name: example-runnerdeploy
spec:
# replicas: 1
template:
spec:
repository: ${TEST_REPO}
#
# Custom runner image
#
image: ${RUNNER_NAME}:${RUNNER_TAG}
imagePullPolicy: IfNotPresent
#
# dockerd within runner container
#
## Replace `mumoshu/actions-runner-dind:dev` with your dind image
#dockerdWithinRunnerContainer: true
#image: mumoshu/actions-runner-dind:dev
#
# Set the MTU used by dockerd-managed network interfaces (including docker-build-ubuntu)
#
#dockerMTU: 1450
#Runner group
# labels:
# - "mylabel 1"
# - "mylabel 2"
#
# Non-standard working directory
#
# workDir: "/"

View File

@@ -1,29 +0,0 @@
apiVersion: actions.summerwind.dev/v1alpha1
kind: HorizontalRunnerAutoscaler
metadata:
name: example-runnerset
spec:
scaleTargetRef:
kind: RunnerSet
name: example-runnerset
scaleUpTriggers:
- githubEvent:
checkRun:
types: ["created"]
status: "queued"
amount: 1
duration: "1m"
# RunnerSet doesn't support scale from/to zero yet
minReplicas: 1
maxReplicas: 5
# This should be less than 600(seconds, the default) for faster testing
scaleDownDelaySecondsAfterScaleOut: 60
metrics:
- type: PercentageRunnersBusy
scaleUpThreshold: '0.75'
scaleDownThreshold: '0.3'
scaleUpFactor: '2'
scaleDownFactor: '0.5'
- type: TotalNumberOfQueuedAndInProgressWorkflowRuns
repositoryNames:
- ${TEST_REPO}

View File

@@ -1,59 +0,0 @@
apiVersion: actions.summerwind.dev/v1alpha1
kind: RunnerSet
metadata:
name: example-runnerset
spec:
# MANDATORY because it is based on StatefulSet: Results in a below error when omitted:
# missing required field "selector" in dev.summerwind.actions.v1alpha1.RunnerSet.spec
selector:
matchLabels:
app: example-runnerset
# MANDATORY because it is based on StatefulSet: Results in a below error when omitted:
# missing required field "serviceName" in dev.summerwind.actions.v1alpha1.RunnerSet.spec]
serviceName: example-runnerset
#replicas: 1
# From my limited testing, `ephemeral: true` is more reliable.
# Seomtimes, updating already deployed runners from `ephemeral: false` to `ephemeral: true` seems to
# result in queued jobs hanging forever.
ephemeral: ${TEST_EPHEMERAL}
repository: ${TEST_REPO}
#
# Custom runner image
#
image: ${RUNNER_NAME}:${RUNNER_TAG}
#
# dockerd within runner container
#
## Replace `mumoshu/actions-runner-dind:dev` with your dind image
#dockerdWithinRunnerContainer: true
#
# Set the MTU used by dockerd-managed network interfaces (including docker-build-ubuntu)
#
#dockerMTU: 1450
#Runner group
# labels:
# - "mylabel 1"
# - "mylabel 2"
labels:
- "${RUNNER_LABEL}"
#
# Non-standard working directory
#
# workDir: "/"
template:
metadata:
labels:
app: example-runnerset
spec:
containers:
- name: runner
imagePullPolicy: IfNotPresent
env:
- name: RUNNER_FEATURE_FLAG_EPHEMERAL
value: "${RUNNER_FEATURE_FLAG_EPHEMERAL}"
#- name: docker
# #image: mumoshu/actions-runner-dind:dev

9
acceptance/testdata/runnerdeploy.yaml vendored Normal file
View File

@@ -0,0 +1,9 @@
apiVersion: actions.summerwind.dev/v1alpha1
kind: RunnerDeployment
metadata:
name: example-runnerdeploy
spec:
# replicas: 1
template:
spec:
repository: mumoshu/actions-runner-controller-ci

View File

@@ -1,20 +0,0 @@
# Set actions-runner-controller settings for testing
githubAPICacheDuration: 10s
githubWebhookServer:
enabled: true
labels: {}
replicaCount: 1
syncPeriod: 10m
secret:
create: true
name: "github-webhook-server"
### GitHub Webhook Configuration
#github_webhook_secret_token: ""
service:
type: NodePort
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
nodePort: 31000

View File

@@ -41,81 +41,9 @@ type HorizontalRunnerAutoscalerSpec struct {
// Metrics is the collection of various metric targets to calculate desired number of runners
// +optional
Metrics []MetricSpec `json:"metrics,omitempty"`
// ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
// on each webhook requested received by the webhookBasedAutoscaler.
//
// This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
//
// Note that the added runners remain until the next sync period at least,
// and they may or may not be used by GitHub Actions depending on the timing.
// They are intended to be used to gain "resource slack" immediately after you
// receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
ScaleUpTriggers []ScaleUpTrigger `json:"scaleUpTriggers,omitempty"`
CapacityReservations []CapacityReservation `json:"capacityReservations,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
// ScheduledOverrides is the list of ScheduledOverride.
// It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
// The earlier a scheduled override is, the higher it is prioritized.
// +optional
ScheduledOverrides []ScheduledOverride `json:"scheduledOverrides,omitempty"`
}
type ScaleUpTrigger struct {
GitHubEvent *GitHubEventScaleUpTriggerSpec `json:"githubEvent,omitempty"`
Amount int `json:"amount,omitempty"`
Duration metav1.Duration `json:"duration,omitempty"`
}
type GitHubEventScaleUpTriggerSpec struct {
CheckRun *CheckRunSpec `json:"checkRun,omitempty"`
PullRequest *PullRequestSpec `json:"pullRequest,omitempty"`
Push *PushSpec `json:"push,omitempty"`
}
// https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
type CheckRunSpec struct {
Types []string `json:"types,omitempty"`
Status string `json:"status,omitempty"`
// Names is a list of GitHub Actions glob patterns.
// Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
// Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
// So it is very likely that you can utilize this to trigger depending on the job.
Names []string `json:"names,omitempty"`
// Repositories is a list of GitHub repositories.
// Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
Repositories []string `json:"repositories,omitempty"`
}
// https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
type PullRequestSpec struct {
Types []string `json:"types,omitempty"`
Branches []string `json:"branches,omitempty"`
}
// PushSpec is the condition for triggering scale-up on push event
// Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
type PushSpec struct {
}
// CapacityReservation specifies the number of replicas temporarily added
// to the scale target until ExpirationTime.
type CapacityReservation struct {
Name string `json:"name,omitempty"`
ExpirationTime metav1.Time `json:"expirationTime,omitempty"`
Replicas int `json:"replicas,omitempty"`
}
type ScaleTargetRef struct {
// Kind is the type of resource being referenced
// +optional
// +kubebuilder:validation:Enum=RunnerDeployment;RunnerSet
Kind string `json:"kind,omitempty"`
// Name is the name of resource being referenced
Name string `json:"name,omitempty"`
}
@@ -148,50 +76,6 @@ type MetricSpec struct {
// to determine how many pods should be removed.
// +optional
ScaleDownFactor string `json:"scaleDownFactor,omitempty"`
// ScaleUpAdjustment is the number of runners added on scale-up.
// You can only specify either ScaleUpFactor or ScaleUpAdjustment.
// +optional
ScaleUpAdjustment int `json:"scaleUpAdjustment,omitempty"`
// ScaleDownAdjustment is the number of runners removed on scale-down.
// You can only specify either ScaleDownFactor or ScaleDownAdjustment.
// +optional
ScaleDownAdjustment int `json:"scaleDownAdjustment,omitempty"`
}
// ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
// A schedule can optionally be recurring, so that the correspoding override happens every day, week, month, or year.
type ScheduledOverride struct {
// StartTime is the time at which the first override starts.
StartTime metav1.Time `json:"startTime"`
// EndTime is the time at which the first override ends.
EndTime metav1.Time `json:"endTime"`
// MinReplicas is the number of runners while overriding.
// If omitted, it doesn't override minReplicas.
// +optional
// +nullable
// +kubebuilder:validation:Minimum=0
MinReplicas *int `json:"minReplicas,omitempty"`
// +optional
RecurrenceRule RecurrenceRule `json:"recurrenceRule,omitempty"`
}
type RecurrenceRule struct {
// Frequency is the name of a predefined interval of each recurrence.
// The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
// If empty, the corresponding override happens only once.
// +optional
// +kubebuilder:validation:Enum=Daily;Weekly;Monthly;Yearly
Frequency string `json:"frequency,omitempty"`
// UntilTime is the time of the final recurrence.
// If empty, the schedule recurs forever.
// +optional
UntilTime metav1.Time `json:"untilTime,omitempty"`
}
type HorizontalRunnerAutoscalerStatus struct {
@@ -206,33 +90,14 @@ type HorizontalRunnerAutoscalerStatus struct {
DesiredReplicas *int `json:"desiredReplicas,omitempty"`
// +optional
// +nullable
LastSuccessfulScaleOutTime *metav1.Time `json:"lastSuccessfulScaleOutTime,omitempty"`
// +optional
CacheEntries []CacheEntry `json:"cacheEntries,omitempty"`
// ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
// for observability.
// +optional
ScheduledOverridesSummary *string `json:"scheduledOverridesSummary,omitempty"`
}
const CacheEntryKeyDesiredReplicas = "desiredReplicas"
type CacheEntry struct {
Key string `json:"key,omitempty"`
Value int `json:"value,omitempty"`
ExpirationTime metav1.Time `json:"expirationTime,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=hra
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:JSONPath=".spec.minReplicas",name=Min,type=number
// +kubebuilder:printcolumn:JSONPath=".spec.maxReplicas",name=Max,type=number
// +kubebuilder:printcolumn:JSONPath=".status.desiredReplicas",name=Desired,type=number
// +kubebuilder:printcolumn:JSONPath=".status.scheduledOverridesSummary",name=Schedule,type=string
// HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
type HorizontalRunnerAutoscaler struct {

View File

@@ -19,23 +19,12 @@ package v1alpha1
import (
"errors"
"k8s.io/apimachinery/pkg/api/resource"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// RunnerSpec defines the desired state of Runner
type RunnerSpec struct {
RunnerConfig `json:",inline"`
RunnerPodSpec `json:",inline"`
}
type RunnerConfig struct {
// +optional
// +kubebuilder:validation:Pattern=`^[^/]+$`
Enterprise string `json:"enterprise,omitempty"`
// +optional
// +kubebuilder:validation:Pattern=`^[^/]+$`
Organization string `json:"organization,omitempty"`
@@ -51,123 +40,64 @@ type RunnerConfig struct {
Group string `json:"group,omitempty"`
// +optional
Ephemeral *bool `json:"ephemeral,omitempty"`
// +optional
Image string `json:"image"`
// +optional
WorkDir string `json:"workDir,omitempty"`
// +optional
DockerdWithinRunnerContainer *bool `json:"dockerdWithinRunnerContainer,omitempty"`
// +optional
DockerEnabled *bool `json:"dockerEnabled,omitempty"`
// +optional
DockerMTU *int64 `json:"dockerMTU,omitempty"`
// +optional
DockerRegistryMirror *string `json:"dockerRegistryMirror,omitempty"`
// +optional
VolumeSizeLimit *resource.Quantity `json:"volumeSizeLimit,omitempty"`
// +optional
VolumeStorageMedium *string `json:"volumeStorageMedium,omitempty"`
}
// RunnerPodSpec defines the desired pod spec fields of the runner pod
type RunnerPodSpec struct {
Containers []corev1.Container `json:"containers,omitempty"`
// +optional
DockerdContainerResources corev1.ResourceRequirements `json:"dockerdContainerResources,omitempty"`
// +optional
DockerVolumeMounts []corev1.VolumeMount `json:"dockerVolumeMounts,omitempty"`
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
// +optional
Containers []corev1.Container `json:"containers,omitempty"`
// +optional
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
// +optional
Env []corev1.EnvVar `json:"env,omitempty"`
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
// +optional
EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"`
// +optional
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
Image string `json:"image"`
// +optional
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
// +optional
Env []corev1.EnvVar `json:"env,omitempty"`
// +optional
Volumes []corev1.Volume `json:"volumes,omitempty"`
// +optional
EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"`
WorkDir string `json:"workDir,omitempty"`
// +optional
InitContainers []corev1.Container `json:"initContainers,omitempty"`
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty"`
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"`
// +optional
SidecarContainers []corev1.Container `json:"sidecarContainers,omitempty"`
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty"`
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"`
// +optional
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
// +optional
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
// +optional
Affinity *corev1.Affinity `json:"affinity,omitempty"`
// +optional
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
// +optional
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
// +optional
EphemeralContainers []corev1.EphemeralContainer `json:"ephemeralContainers,omitempty"`
// +optional
HostAliases []corev1.HostAlias `json:"hostAliases,omitempty"`
// RuntimeClassName is the container runtime configuration that containers should run under.
// More info: https://kubernetes.io/docs/concepts/containers/runtime-class
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
// +optional
RuntimeClassName *string `json:"runtimeClassName,omitempty"`
DockerdWithinRunnerContainer *bool `json:"dockerdWithinRunnerContainer,omitempty"`
// +optional
DnsConfig []corev1.PodDNSConfig `json:"dnsConfig,omitempty"`
DockerEnabled *bool `json:"dockerEnabled,omitempty"`
}
// ValidateRepository validates repository field.
func (rs *RunnerSpec) ValidateRepository() error {
// Enterprise, Organization and repository are both exclusive.
foundCount := 0
if len(rs.Organization) > 0 {
foundCount += 1
// Organization and repository are both exclusive.
if len(rs.Organization) == 0 && len(rs.Repository) == 0 {
return errors.New("Spec needs organization or repository")
}
if len(rs.Repository) > 0 {
foundCount += 1
}
if len(rs.Enterprise) > 0 {
foundCount += 1
}
if foundCount == 0 {
return errors.New("Spec needs enterprise, organization or repository")
}
if foundCount > 1 {
return errors.New("Spec cannot have many fields defined enterprise, organization and repository")
if len(rs.Organization) > 0 && len(rs.Repository) > 0 {
return errors.New("Spec cannot have both organization and repository")
}
return nil
@@ -175,22 +105,14 @@ func (rs *RunnerSpec) ValidateRepository() error {
// RunnerStatus defines the observed state of Runner
type RunnerStatus struct {
// +optional
Registration RunnerStatusRegistration `json:"registration"`
// +optional
Phase string `json:"phase,omitempty"`
// +optional
Reason string `json:"reason,omitempty"`
// +optional
Message string `json:"message,omitempty"`
// +optional
// +nullable
LastRegistrationCheckTime *metav1.Time `json:"lastRegistrationCheckTime,omitempty"`
Phase string `json:"phase"`
Reason string `json:"reason"`
Message string `json:"message"`
}
// RunnerStatusRegistration contains runner registration status
type RunnerStatusRegistration struct {
Enterprise string `json:"enterprise,omitempty"`
Organization string `json:"organization,omitempty"`
Repository string `json:"repository,omitempty"`
Labels []string `json:"labels,omitempty"`
@@ -200,12 +122,10 @@ type RunnerStatusRegistration struct {
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:JSONPath=".spec.enterprise",name=Enterprise,type=string
// +kubebuilder:printcolumn:JSONPath=".spec.organization",name=Organization,type=string
// +kubebuilder:printcolumn:JSONPath=".spec.repository",name=Repository,type=string
// +kubebuilder:printcolumn:JSONPath=".spec.labels",name=Labels,type=string
// +kubebuilder:printcolumn:JSONPath=".status.phase",name=Status,type=string
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// Runner is the Schema for the runners API
type Runner struct {

View File

@@ -21,7 +21,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
@@ -34,7 +34,7 @@ func (r *Runner) SetupWebhookWithManager(mgr ctrl.Manager) error {
Complete()
}
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runner,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runners,versions=v1alpha1,name=mutate.runner.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runner,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runners,versions=v1alpha1,name=mutate.runner.actions.summerwind.dev
var _ webhook.Defaulter = &Runner{}
@@ -43,7 +43,7 @@ func (r *Runner) Default() {
// Nothing to do.
}
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runner,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runners,versions=v1alpha1,name=validate.runner.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runner,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runners,versions=v1alpha1,name=validate.runner.actions.summerwind.dev
var _ webhook.Validator = &Runner{}

View File

@@ -25,55 +25,30 @@ const (
AutoscalingMetricTypePercentageRunnersBusy = "PercentageRunnersBusy"
)
// RunnerDeploymentSpec defines the desired state of RunnerDeployment
// RunnerReplicaSetSpec defines the desired state of RunnerDeployment
type RunnerDeploymentSpec struct {
// +optional
// +nullable
Replicas *int `json:"replicas,omitempty"`
// +optional
// +nullable
Selector *metav1.LabelSelector `json:"selector"`
Template RunnerTemplate `json:"template"`
Template RunnerTemplate `json:"template"`
}
type RunnerDeploymentStatus struct {
// See K8s deployment controller code for reference
// https://github.com/kubernetes/kubernetes/blob/ea0764452222146c47ec826977f49d7001b0ea8c/pkg/controller/deployment/sync.go#L487-L505
AvailableReplicas int `json:"availableReplicas"`
ReadyReplicas int `json:"readyReplicas"`
// AvailableReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
// This corresponds to the sum of status.availableReplicas of all the runner replica sets.
// +optional
AvailableReplicas *int `json:"availableReplicas"`
// ReadyReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
// This corresponds to the sum of status.readyReplicas of all the runner replica sets.
// +optional
ReadyReplicas *int `json:"readyReplicas"`
// ReadyReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
// This corresponds to status.replicas of the runner replica set that has the desired template hash.
// +optional
UpdatedReplicas *int `json:"updatedReplicas"`
// DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
// Replicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
// This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
// +optional
DesiredReplicas *int `json:"desiredReplicas"`
// Replicas is the total number of replicas
// +optional
Replicas *int `json:"replicas"`
Replicas *int `json:"desiredReplicas,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=rdeploy
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
// +kubebuilder:printcolumn:JSONPath=".status.replicas",name=Current,type=number
// +kubebuilder:printcolumn:JSONPath=".status.updatedReplicas",name=Up-To-Date,type=number
// +kubebuilder:printcolumn:JSONPath=".status.availableReplicas",name=Available,type=number
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:printcolumn:JSONPath=".status.availableReplicas",name=Current,type=number
// +kubebuilder:printcolumn:JSONPath=".status.readyReplicas",name=Ready,type=number
// RunnerDeployment is the Schema for the runnerdeployments API
type RunnerDeployment struct {

View File

@@ -21,7 +21,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
@@ -34,7 +34,7 @@ func (r *RunnerDeployment) SetupWebhookWithManager(mgr ctrl.Manager) error {
Complete()
}
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runnerdeployment,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerdeployments,versions=v1alpha1,name=mutate.runnerdeployment.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runnerdeployment,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerdeployments,versions=v1alpha1,name=mutate.runnerdeployment.actions.summerwind.dev
var _ webhook.Defaulter = &RunnerDeployment{}
@@ -43,7 +43,7 @@ func (r *RunnerDeployment) Default() {
// Nothing to do.
}
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runnerdeployment,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerdeployments,versions=v1alpha1,name=validate.runnerdeployment.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runnerdeployment,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerdeployments,versions=v1alpha1,name=validate.runnerdeployment.actions.summerwind.dev
var _ webhook.Validator = &RunnerDeployment{}

View File

@@ -26,26 +26,12 @@ type RunnerReplicaSetSpec struct {
// +nullable
Replicas *int `json:"replicas,omitempty"`
// +optional
// +nullable
Selector *metav1.LabelSelector `json:"selector"`
Template RunnerTemplate `json:"template"`
Template RunnerTemplate `json:"template"`
}
type RunnerReplicaSetStatus struct {
// See K8s replicaset controller code for reference
// https://github.com/kubernetes/kubernetes/blob/ea0764452222146c47ec826977f49d7001b0ea8c/pkg/controller/replicaset/replica_set_utils.go#L101-L106
// Replicas is the number of runners that are created and still being managed by this runner replica set.
// +optional
Replicas *int `json:"replicas"`
// ReadyReplicas is the number of runners that are created and Runnning.
ReadyReplicas *int `json:"readyReplicas"`
// AvailableReplicas is the number of runners that are created and Runnning.
// This is currently same as ReadyReplicas but perserved for future use.
AvailableReplicas *int `json:"availableReplicas"`
AvailableReplicas int `json:"availableReplicas"`
ReadyReplicas int `json:"readyReplicas"`
}
type RunnerTemplate struct {
@@ -55,12 +41,10 @@ type RunnerTemplate struct {
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=rrs
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
// +kubebuilder:printcolumn:JSONPath=".status.replicas",name=Current,type=number
// +kubebuilder:printcolumn:JSONPath=".status.availableReplicas",name=Current,type=number
// +kubebuilder:printcolumn:JSONPath=".status.readyReplicas",name=Ready,type=number
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// RunnerReplicaSet is the Schema for the runnerreplicasets API
type RunnerReplicaSet struct {

View File

@@ -21,7 +21,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
@@ -34,7 +34,7 @@ func (r *RunnerReplicaSet) SetupWebhookWithManager(mgr ctrl.Manager) error {
Complete()
}
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runnerreplicaset,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerreplicasets,versions=v1alpha1,name=mutate.runnerreplicaset.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runnerreplicaset,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerreplicasets,versions=v1alpha1,name=mutate.runnerreplicaset.actions.summerwind.dev
var _ webhook.Defaulter = &RunnerReplicaSet{}
@@ -43,7 +43,7 @@ func (r *RunnerReplicaSet) Default() {
// Nothing to do.
}
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runnerreplicaset,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerreplicasets,versions=v1alpha1,name=validate.runnerreplicaset.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runnerreplicaset,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerreplicasets,versions=v1alpha1,name=validate.runnerreplicaset.actions.summerwind.dev
var _ webhook.Validator = &RunnerReplicaSet{}

View File

@@ -1,88 +0,0 @@
/*
Copyright 2021 The actions-runner-controller authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// RunnerSetSpec defines the desired state of RunnerSet
type RunnerSetSpec struct {
RunnerConfig `json:",inline"`
appsv1.StatefulSetSpec `json:",inline"`
}
type RunnerSetStatus struct {
// See K8s deployment controller code for reference
// https://github.com/kubernetes/kubernetes/blob/ea0764452222146c47ec826977f49d7001b0ea8c/pkg/controller/deployment/sync.go#L487-L505
// AvailableReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
// This corresponds to the sum of status.availableReplicas of all the runner replica sets.
// +optional
CurrentReplicas *int `json:"availableReplicas"`
// ReadyReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
// This corresponds to the sum of status.readyReplicas of all the runner replica sets.
// +optional
ReadyReplicas *int `json:"readyReplicas"`
// ReadyReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
// This corresponds to status.replicas of the runner replica set that has the desired template hash.
// +optional
UpdatedReplicas *int `json:"updatedReplicas"`
// DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
// This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
// +optional
DesiredReplicas *int `json:"desiredReplicas"`
// Replicas is the total number of replicas
// +optional
Replicas *int `json:"replicas"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
// +kubebuilder:printcolumn:JSONPath=".status.replicas",name=Current,type=number
// +kubebuilder:printcolumn:JSONPath=".status.updatedReplicas",name=Up-To-Date,type=number
// +kubebuilder:printcolumn:JSONPath=".status.availableReplicas",name=Available,type=number
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// RunnerSet is the Schema for the runnersets API
type RunnerSet struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec RunnerSetSpec `json:"spec,omitempty"`
Status RunnerSetStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// RunnerList contains a list of Runner
type RunnerSetList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []RunnerSet `json:"items"`
}
func init() {
SchemeBuilder.Register(&RunnerSet{}, &RunnerSetList{})
}

View File

@@ -22,102 +22,9 @@ package v1alpha1
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CacheEntry) DeepCopyInto(out *CacheEntry) {
*out = *in
in.ExpirationTime.DeepCopyInto(&out.ExpirationTime)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheEntry.
func (in *CacheEntry) DeepCopy() *CacheEntry {
if in == nil {
return nil
}
out := new(CacheEntry)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CapacityReservation) DeepCopyInto(out *CapacityReservation) {
*out = *in
in.ExpirationTime.DeepCopyInto(&out.ExpirationTime)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservation.
func (in *CapacityReservation) DeepCopy() *CapacityReservation {
if in == nil {
return nil
}
out := new(CapacityReservation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CheckRunSpec) DeepCopyInto(out *CheckRunSpec) {
*out = *in
if in.Types != nil {
in, out := &in.Types, &out.Types
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Names != nil {
in, out := &in.Names, &out.Names
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Repositories != nil {
in, out := &in.Repositories, &out.Repositories
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckRunSpec.
func (in *CheckRunSpec) DeepCopy() *CheckRunSpec {
if in == nil {
return nil
}
out := new(CheckRunSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitHubEventScaleUpTriggerSpec) DeepCopyInto(out *GitHubEventScaleUpTriggerSpec) {
*out = *in
if in.CheckRun != nil {
in, out := &in.CheckRun, &out.CheckRun
*out = new(CheckRunSpec)
(*in).DeepCopyInto(*out)
}
if in.PullRequest != nil {
in, out := &in.PullRequest, &out.PullRequest
*out = new(PullRequestSpec)
(*in).DeepCopyInto(*out)
}
if in.Push != nil {
in, out := &in.Push, &out.Push
*out = new(PushSpec)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubEventScaleUpTriggerSpec.
func (in *GitHubEventScaleUpTriggerSpec) DeepCopy() *GitHubEventScaleUpTriggerSpec {
if in == nil {
return nil
}
out := new(GitHubEventScaleUpTriggerSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalRunnerAutoscaler) DeepCopyInto(out *HorizontalRunnerAutoscaler) {
*out = *in
@@ -203,27 +110,6 @@ func (in *HorizontalRunnerAutoscalerSpec) DeepCopyInto(out *HorizontalRunnerAuto
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ScaleUpTriggers != nil {
in, out := &in.ScaleUpTriggers, &out.ScaleUpTriggers
*out = make([]ScaleUpTrigger, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.CapacityReservations != nil {
in, out := &in.CapacityReservations, &out.CapacityReservations
*out = make([]CapacityReservation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ScheduledOverrides != nil {
in, out := &in.ScheduledOverrides, &out.ScheduledOverrides
*out = make([]ScheduledOverride, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerSpec.
@@ -248,18 +134,6 @@ func (in *HorizontalRunnerAutoscalerStatus) DeepCopyInto(out *HorizontalRunnerAu
in, out := &in.LastSuccessfulScaleOutTime, &out.LastSuccessfulScaleOutTime
*out = (*in).DeepCopy()
}
if in.CacheEntries != nil {
in, out := &in.CacheEntries, &out.CacheEntries
*out = make([]CacheEntry, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ScheduledOverridesSummary != nil {
in, out := &in.ScheduledOverridesSummary, &out.ScheduledOverridesSummary
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerStatus.
@@ -292,62 +166,6 @@ func (in *MetricSpec) DeepCopy() *MetricSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PullRequestSpec) DeepCopyInto(out *PullRequestSpec) {
*out = *in
if in.Types != nil {
in, out := &in.Types, &out.Types
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Branches != nil {
in, out := &in.Branches, &out.Branches
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PullRequestSpec.
func (in *PullRequestSpec) DeepCopy() *PullRequestSpec {
if in == nil {
return nil
}
out := new(PullRequestSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PushSpec) DeepCopyInto(out *PushSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PushSpec.
func (in *PushSpec) DeepCopy() *PushSpec {
if in == nil {
return nil
}
out := new(PushSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RecurrenceRule) DeepCopyInto(out *RecurrenceRule) {
*out = *in
in.UntilTime.DeepCopyInto(&out.UntilTime)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceRule.
func (in *RecurrenceRule) DeepCopy() *RecurrenceRule {
if in == nil {
return nil
}
out := new(RecurrenceRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Runner) DeepCopyInto(out *Runner) {
*out = *in
@@ -375,61 +193,6 @@ func (in *Runner) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerConfig) DeepCopyInto(out *RunnerConfig) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Ephemeral != nil {
in, out := &in.Ephemeral, &out.Ephemeral
*out = new(bool)
**out = **in
}
if in.DockerdWithinRunnerContainer != nil {
in, out := &in.DockerdWithinRunnerContainer, &out.DockerdWithinRunnerContainer
*out = new(bool)
**out = **in
}
if in.DockerEnabled != nil {
in, out := &in.DockerEnabled, &out.DockerEnabled
*out = new(bool)
**out = **in
}
if in.DockerMTU != nil {
in, out := &in.DockerMTU, &out.DockerMTU
*out = new(int64)
**out = **in
}
if in.DockerRegistryMirror != nil {
in, out := &in.DockerRegistryMirror, &out.DockerRegistryMirror
*out = new(string)
**out = **in
}
if in.VolumeSizeLimit != nil {
in, out := &in.VolumeSizeLimit, &out.VolumeSizeLimit
x := (*in).DeepCopy()
*out = &x
}
if in.VolumeStorageMedium != nil {
in, out := &in.VolumeStorageMedium, &out.VolumeStorageMedium
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerConfig.
func (in *RunnerConfig) DeepCopy() *RunnerConfig {
if in == nil {
return nil
}
out := new(RunnerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerDeployment) DeepCopyInto(out *RunnerDeployment) {
*out = *in
@@ -497,11 +260,6 @@ func (in *RunnerDeploymentSpec) DeepCopyInto(out *RunnerDeploymentSpec) {
*out = new(int)
**out = **in
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
in.Template.DeepCopyInto(&out.Template)
}
@@ -518,26 +276,6 @@ func (in *RunnerDeploymentSpec) DeepCopy() *RunnerDeploymentSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerDeploymentStatus) DeepCopyInto(out *RunnerDeploymentStatus) {
*out = *in
if in.AvailableReplicas != nil {
in, out := &in.AvailableReplicas, &out.AvailableReplicas
*out = new(int)
**out = **in
}
if in.ReadyReplicas != nil {
in, out := &in.ReadyReplicas, &out.ReadyReplicas
*out = new(int)
**out = **in
}
if in.UpdatedReplicas != nil {
in, out := &in.UpdatedReplicas, &out.UpdatedReplicas
*out = new(int)
**out = **in
}
if in.DesiredReplicas != nil {
in, out := &in.DesiredReplicas, &out.DesiredReplicas
*out = new(int)
**out = **in
}
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(int)
@@ -587,156 +325,13 @@ func (in *RunnerList) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerPodSpec) DeepCopyInto(out *RunnerPodSpec) {
*out = *in
in.DockerdContainerResources.DeepCopyInto(&out.DockerdContainerResources)
if in.DockerVolumeMounts != nil {
in, out := &in.DockerVolumeMounts, &out.DockerVolumeMounts
*out = make([]v1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Containers != nil {
in, out := &in.Containers, &out.Containers
*out = make([]v1.Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]v1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.EnvFrom != nil {
in, out := &in.EnvFrom, &out.EnvFrom
*out = make([]v1.EnvFromSource, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.Resources.DeepCopyInto(&out.Resources)
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]v1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]v1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.EnableServiceLinks != nil {
in, out := &in.EnableServiceLinks, &out.EnableServiceLinks
*out = new(bool)
**out = **in
}
if in.InitContainers != nil {
in, out := &in.InitContainers, &out.InitContainers
*out = make([]v1.Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.AutomountServiceAccountToken != nil {
in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
*out = new(bool)
**out = **in
}
if in.SidecarContainers != nil {
in, out := &in.SidecarContainers, &out.SidecarContainers
*out = make([]v1.Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(v1.PodSecurityContext)
(*in).DeepCopyInto(*out)
}
if in.ImagePullSecrets != nil {
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
*out = make([]v1.LocalObjectReference, len(*in))
copy(*out, *in)
}
if in.Affinity != nil {
in, out := &in.Affinity, &out.Affinity
*out = new(v1.Affinity)
(*in).DeepCopyInto(*out)
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]v1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.TerminationGracePeriodSeconds != nil {
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
*out = new(int64)
**out = **in
}
if in.EphemeralContainers != nil {
in, out := &in.EphemeralContainers, &out.EphemeralContainers
*out = make([]v1.EphemeralContainer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.HostAliases != nil {
in, out := &in.HostAliases, &out.HostAliases
*out = make([]v1.HostAlias, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.RuntimeClassName != nil {
in, out := &in.RuntimeClassName, &out.RuntimeClassName
*out = new(string)
**out = **in
}
if in.DnsConfig != nil {
in, out := &in.DnsConfig, &out.DnsConfig
*out = make([]v1.PodDNSConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerPodSpec.
func (in *RunnerPodSpec) DeepCopy() *RunnerPodSpec {
if in == nil {
return nil
}
out := new(RunnerPodSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerReplicaSet) DeepCopyInto(out *RunnerReplicaSet) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerReplicaSet.
@@ -797,11 +392,6 @@ func (in *RunnerReplicaSetSpec) DeepCopyInto(out *RunnerReplicaSetSpec) {
*out = new(int)
**out = **in
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
in.Template.DeepCopyInto(&out.Template)
}
@@ -818,21 +408,6 @@ func (in *RunnerReplicaSetSpec) DeepCopy() *RunnerReplicaSetSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerReplicaSetStatus) DeepCopyInto(out *RunnerReplicaSetStatus) {
*out = *in
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(int)
**out = **in
}
if in.ReadyReplicas != nil {
in, out := &in.ReadyReplicas, &out.ReadyReplicas
*out = new(int)
**out = **in
}
if in.AvailableReplicas != nil {
in, out := &in.AvailableReplicas, &out.AvailableReplicas
*out = new(int)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerReplicaSetStatus.
@@ -846,126 +421,120 @@ func (in *RunnerReplicaSetStatus) DeepCopy() *RunnerReplicaSetStatus {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerSet) DeepCopyInto(out *RunnerSet) {
func (in *RunnerSpec) DeepCopyInto(out *RunnerSpec) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSet.
func (in *RunnerSet) DeepCopy() *RunnerSet {
if in == nil {
return nil
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make([]string, len(*in))
copy(*out, *in)
}
out := new(RunnerSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RunnerSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerSetList) DeepCopyInto(out *RunnerSetList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]RunnerSet, len(*in))
if in.Containers != nil {
in, out := &in.Containers, &out.Containers
*out = make([]v1.Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSetList.
func (in *RunnerSetList) DeepCopy() *RunnerSetList {
if in == nil {
return nil
in.DockerdContainerResources.DeepCopyInto(&out.DockerdContainerResources)
in.Resources.DeepCopyInto(&out.Resources)
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]v1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
out := new(RunnerSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RunnerSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
if in.EnvFrom != nil {
in, out := &in.EnvFrom, &out.EnvFrom
*out = make([]v1.EnvFromSource, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerSetSpec) DeepCopyInto(out *RunnerSetSpec) {
*out = *in
in.RunnerConfig.DeepCopyInto(&out.RunnerConfig)
in.StatefulSetSpec.DeepCopyInto(&out.StatefulSetSpec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSetSpec.
func (in *RunnerSetSpec) DeepCopy() *RunnerSetSpec {
if in == nil {
return nil
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]v1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
out := new(RunnerSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerSetStatus) DeepCopyInto(out *RunnerSetStatus) {
*out = *in
if in.CurrentReplicas != nil {
in, out := &in.CurrentReplicas, &out.CurrentReplicas
*out = new(int)
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]v1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.InitContainers != nil {
in, out := &in.InitContainers, &out.InitContainers
*out = make([]v1.Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.SidecarContainers != nil {
in, out := &in.SidecarContainers, &out.SidecarContainers
*out = make([]v1.Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.AutomountServiceAccountToken != nil {
in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
*out = new(bool)
**out = **in
}
if in.ReadyReplicas != nil {
in, out := &in.ReadyReplicas, &out.ReadyReplicas
*out = new(int)
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(v1.PodSecurityContext)
(*in).DeepCopyInto(*out)
}
if in.ImagePullSecrets != nil {
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
*out = make([]v1.LocalObjectReference, len(*in))
copy(*out, *in)
}
if in.Affinity != nil {
in, out := &in.Affinity, &out.Affinity
*out = new(v1.Affinity)
(*in).DeepCopyInto(*out)
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]v1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.EphemeralContainers != nil {
in, out := &in.EphemeralContainers, &out.EphemeralContainers
*out = make([]v1.EphemeralContainer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.TerminationGracePeriodSeconds != nil {
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
*out = new(int64)
**out = **in
}
if in.UpdatedReplicas != nil {
in, out := &in.UpdatedReplicas, &out.UpdatedReplicas
*out = new(int)
if in.DockerdWithinRunnerContainer != nil {
in, out := &in.DockerdWithinRunnerContainer, &out.DockerdWithinRunnerContainer
*out = new(bool)
**out = **in
}
if in.DesiredReplicas != nil {
in, out := &in.DesiredReplicas, &out.DesiredReplicas
*out = new(int)
if in.DockerEnabled != nil {
in, out := &in.DockerEnabled, &out.DockerEnabled
*out = new(bool)
**out = **in
}
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(int)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSetStatus.
func (in *RunnerSetStatus) DeepCopy() *RunnerSetStatus {
if in == nil {
return nil
}
out := new(RunnerSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerSpec) DeepCopyInto(out *RunnerSpec) {
*out = *in
in.RunnerConfig.DeepCopyInto(&out.RunnerConfig)
in.RunnerPodSpec.DeepCopyInto(&out.RunnerPodSpec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSpec.
@@ -982,10 +551,6 @@ func (in *RunnerSpec) DeepCopy() *RunnerSpec {
func (in *RunnerStatus) DeepCopyInto(out *RunnerStatus) {
*out = *in
in.Registration.DeepCopyInto(&out.Registration)
if in.LastRegistrationCheckTime != nil {
in, out := &in.LastRegistrationCheckTime, &out.LastRegistrationCheckTime
*out = (*in).DeepCopy()
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerStatus.
@@ -1050,47 +615,3 @@ func (in *ScaleTargetRef) DeepCopy() *ScaleTargetRef {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleUpTrigger) DeepCopyInto(out *ScaleUpTrigger) {
*out = *in
if in.GitHubEvent != nil {
in, out := &in.GitHubEvent, &out.GitHubEvent
*out = new(GitHubEventScaleUpTriggerSpec)
(*in).DeepCopyInto(*out)
}
out.Duration = in.Duration
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleUpTrigger.
func (in *ScaleUpTrigger) DeepCopy() *ScaleUpTrigger {
if in == nil {
return nil
}
out := new(ScaleUpTrigger)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScheduledOverride) DeepCopyInto(out *ScheduledOverride) {
*out = *in
in.StartTime.DeepCopyInto(&out.StartTime)
in.EndTime.DeepCopyInto(&out.EndTime)
if in.MinReplicas != nil {
in, out := &in.MinReplicas, &out.MinReplicas
*out = new(int)
**out = **in
}
in.RecurrenceRule.DeepCopyInto(&out.RecurrenceRule)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledOverride.
func (in *ScheduledOverride) DeepCopy() *ScheduledOverride {
if in == nil {
return nil
}
out := new(ScheduledOverride)
in.DeepCopyInto(out)
return out
}

4
artifacthub-repo.yml Normal file
View File

@@ -0,0 +1,4 @@
repositoryID: 6e120248-b034-45e5-b16c-6015ecfa7c6c
owners:
- name: mumoshu
email: ykuoka@gmail.com

View File

@@ -1,4 +0,0 @@
# This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow
lint-conf: charts/.ci/lint-config.yaml
chart-repos:
- jetstack=https://charts.jetstack.io

View File

@@ -1,6 +0,0 @@
rules:
# One blank line is OK
empty-lines:
max-start: 1
max-end: 1
max: 1

View File

@@ -1,3 +0,0 @@
#!/bin/bash
docker run --rm -it -w /repo -v $(pwd):/repo quay.io/helmpack/chart-testing ct lint --all --config charts/.ci/ct-config.yaml

View File

@@ -1,15 +0,0 @@
#!/bin/bash
for chart in `ls charts`;
do
helm template --values charts/$chart/ci/ci-values.yaml charts/$chart | kube-score score - \
--ignore-test pod-networkpolicy \
--ignore-test deployment-has-poddisruptionbudget \
--ignore-test deployment-has-host-podantiaffinity \
--ignore-test pod-probes \
--ignore-test container-image-tag \
--enable-optional-test container-security-context-privileged \
--enable-optional-test container-security-context-readonlyrootfilesystem \
--ignore-test container-security-context
done

View File

@@ -21,5 +21,3 @@
.idea/
*.tmproj
.vscode/
# Docs
docs/

View File

@@ -15,16 +15,9 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.12.8
version: 0.1.0
# Used as the default manager tag value when no tag property is provided in the values.yaml
appVersion: 0.19.0
home: https://github.com/actions-runner-controller/actions-runner-controller
sources:
- https://github.com/actions-runner-controller/actions-runner-controller
maintainers:
- name: actions-runner-controller
url: https://github.com/actions-runner-controller
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 0.11.2

View File

@@ -1,86 +0,0 @@
## Docs
All additional docs are kept in the `docs/` folder, this README is solely for documenting the values.yaml keys and values
## Values
_The values are documented as of HEAD_
_Default values are the defaults set in the charts values.yaml, some properties have default configurations in the code for when the property is omitted or invalid_
| Key | Description | Default |
|----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------|
| `labels` | Set labels to apply to all resources in the chart | |
| `replicaCount` | Set the number of controller pods | 1 |
| `syncPeriod` | Set the period in which the controler reconciles the desired runners count | 10m |
| `githubAPICacheDuration` | Set the cache period for API calls | |
| `githubEnterpriseServerURL` | Set the URL for a self-hosted GitHub Enterprise Server | |
| `logLevel` | Set the log level of the controller container | |
| `authSecret.create` | Deploy the controller auth secret | false |
| `authSecret.name` | Set the name of the auth secret | controller-manager |
| `authSecret.github_app_id` | The ID of your GitHub App. **This can't be set at the same time as `authSecret.github_token`** | |
| `authSecret.github_app_installation_id` | The ID of your GitHub App installation. **This can't be set at the same time as `authSecret.github_token`** | |
| `authSecret.github_app_private_key` | The multiline string of your GitHub App's private key. **This can't be set at the same time as `authSecret.github_token`** | |
| `authSecret.github_token` | Your chosen GitHub PAT token. **This can't be set at the same time as the `authSecret.github_app_*`** | |
| `dockerRegistryMirror` | The default Docker Registry Mirror used by runners. |
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
| `image.tag` | The tag of the controller container | |
| `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest |
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
| `metrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
| `metrics.port` | Set port of metrics service | 8443 |
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
| `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
| `fullNameOverride` | Override the full resource names | |
| `nameOverride` | Override the resource name prefix | |
| `serviceAccont.annotations` | Set annotations to the service account | |
| `serviceAccount.create` | Deploy the controller pod under a service account | true |
| `podAnnotations` | Set annotations for the controller pod | |
| `podLabels` | Set labels for the controller pod | |
| `serviceAccount.name` | Set the name of the service account | |
| `securityContext` | Set the security context for each container in the controller pod | |
| `podSecurityContext` | Set the security context to controller pod | |
| `service.port` | Set controller service type | |
| `service.type` | Set controller service ports | |
| `topologySpreadConstraints` | Set the controller pod topologySpreadConstraints | |
| `nodeSelector` | Set the controller pod nodeSelector | |
| `resources` | Set the controller pod resources | |
| `affinity` | Set the controller pod affinity rules | |
| `tolerations` | Set the controller pod tolerations | |
| `env` | Set environment variables for the controller container | |
| `priorityClassName` | Set the controller pod priorityClassName | |
| `scope.watchNamespace` | Tells the controller which namespace to watch if `scope.singleNamespace` is true | |
| `scope.singleNamespace` | Limit the controller to watch a single namespace | false |
| `githubWebhookServer.logLevel` | Set the log level of the githubWebhookServer container | |
| `githubWebhookServer.replicaCount` | Set the number of webhook server pods | 1 |
| `githubWebhookServer.syncPeriod` | Set the period in which the controller reconciles the resources | 10m |
| `githubWebhookServer.enabled` | Deploy the webhook server pod | false |
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | false |
| `githubWebhookServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
| `githubWebhookServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
| `githubWebhookServer.imagePullSecrets` | Specifies the secret to be used when pulling the githubWebhookServer pod containers | |
| `githubWebhookServer.nameOveride` | Override the resource name prefix | |
| `githubWebhookServer.fullNameOveride` | Override the full resource names | |
| `githubWebhookServer.serviceAccount.create` | Deploy the githubWebhookServer under a service account | true |
| `githubWebhookServer.serviceAccount.annotations` | Set annotations for the service account | |
| `githubWebhookServer.serviceAccount.name` | Set the service account name | |
| `githubWebhookServer.podAnnotations` | Set annotations for the githubWebhookServer pod | |
| `githubWebhookServer.podLabels` | Set labels for the githubWebhookServer pod | |
| `githubWebhookServer.podSecurityContext` | Set the security context to githubWebhookServer pod | |
| `githubWebhookServer.securityContext` | Set the security context for each container in the githubWebhookServer pod | |
| `githubWebhookServer.resources` | Set the githubWebhookServer pod resources | |
| `githubWebhookServer.topologySpreadConstraints` | Set the githubWebhookServer pod topologySpreadConstraints | |
| `githubWebhookServer.nodeSelector` | Set the githubWebhookServer pod nodeSelector | |
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
| `githubWebhookServer.ingress.tls` | Set tls configuration for ingress | |

View File

@@ -1,30 +0,0 @@
# This file sets some opinionated values for kube-score to use
# when parsing the chart
image:
pullPolicy: Always
podSecurityContext:
fsGroup: 2000
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 2000
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
authSecret:
create: false
# Set the following to true to create a dummy secret, allowing the manager pod to start
# This is only useful in CI
createDummySecret: true

View File

@@ -1,304 +1,133 @@
---
apiVersion: apiextensions.k8s.io/v1
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.6.0
controller-gen.kubebuilder.io/version: v0.3.0
creationTimestamp: null
name: horizontalrunnerautoscalers.actions.summerwind.dev
spec:
additionalPrinterColumns:
- JSONPath: .spec.minReplicas
name: Min
type: number
- JSONPath: .spec.maxReplicas
name: Max
type: number
- JSONPath: .status.desiredReplicas
name: Desired
type: number
group: actions.summerwind.dev
names:
kind: HorizontalRunnerAutoscaler
listKind: HorizontalRunnerAutoscalerList
plural: horizontalrunnerautoscalers
shortNames:
- hra
singular: horizontalrunnerautoscaler
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.minReplicas
name: Min
type: number
- jsonPath: .spec.maxReplicas
name: Max
type: number
- jsonPath: .status.desiredReplicas
name: Desired
type: number
- jsonPath: .status.scheduledOverridesSummary
name: Schedule
type: string
name: v1alpha1
schema:
openAPIV3Schema:
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: HorizontalRunnerAutoscalerSpec defines the desired state
of HorizontalRunnerAutoscaler
properties:
capacityReservations:
items:
description: CapacityReservation specifies the number of replicas
temporarily added to the scale target until ExpirationTime.
properties:
expirationTime:
format: date-time
type: string
name:
type: string
replicas:
type: integer
type: object
type: array
maxReplicas:
description: MinReplicas is the maximum number of replicas the deployment
is allowed to scale
type: integer
metrics:
description: Metrics is the collection of various metric targets to
calculate desired number of runners
items:
properties:
repositoryNames:
description: RepositoryNames is the list of repository names
to be used for calculating the metric. For example, a repository
name is the REPO part of `github.com/USER/REPO`.
items:
type: string
type: array
scaleDownAdjustment:
description: ScaleDownAdjustment is the number of runners removed
on scale-down. You can only specify either ScaleDownFactor
or ScaleDownAdjustment.
type: integer
scaleDownFactor:
description: ScaleDownFactor is the multiplicative factor applied
to the current number of runners used to determine how many
pods should be removed.
type: string
scaleDownThreshold:
description: ScaleDownThreshold is the percentage of busy runners
less than which will trigger the hpa to scale the runners
down.
type: string
scaleUpAdjustment:
description: ScaleUpAdjustment is the number of runners added
on scale-up. You can only specify either ScaleUpFactor or
ScaleUpAdjustment.
type: integer
scaleUpFactor:
description: ScaleUpFactor is the multiplicative factor applied
to the current number of runners used to determine how many
pods should be added.
type: string
scaleUpThreshold:
description: ScaleUpThreshold is the percentage of busy runners
greater than which will trigger the hpa to scale runners up.
type: string
type:
description: Type is the type of metric to be used for autoscaling.
The only supported Type is TotalNumberOfQueuedAndInProgressWorkflowRuns
type: string
type: object
type: array
minReplicas:
description: MinReplicas is the minimum number of replicas the deployment
is allowed to scale
type: integer
scaleDownDelaySecondsAfterScaleOut:
description: ScaleDownDelaySecondsAfterScaleUp is the approximate
delay for a scale down followed by a scale up Used to prevent flapping
(down->up->down->... loop)
type: integer
scaleTargetRef:
description: ScaleTargetRef sis the reference to scaled resource like
RunnerDeployment
subresources:
status: {}
validation:
openAPIV3Schema:
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: HorizontalRunnerAutoscalerSpec defines the desired state of
HorizontalRunnerAutoscaler
properties:
maxReplicas:
description: MinReplicas is the maximum number of replicas the deployment
is allowed to scale
type: integer
metrics:
description: Metrics is the collection of various metric targets to
calculate desired number of runners
items:
properties:
kind:
description: Kind is the type of resource being referenced
enum:
- RunnerDeployment
- RunnerSet
repositoryNames:
description: RepositoryNames is the list of repository names to
be used for calculating the metric. For example, a repository
name is the REPO part of `github.com/USER/REPO`.
items:
type: string
type: array
scaleDownFactor:
description: ScaleDownFactor is the multiplicative factor applied
to the current number of runners used to determine how many
pods should be removed.
type: string
name:
description: Name is the name of resource being referenced
scaleDownThreshold:
description: ScaleDownThreshold is the percentage of busy runners
less than which will trigger the hpa to scale the runners down.
type: string
scaleUpFactor:
description: ScaleUpFactor is the multiplicative factor applied
to the current number of runners used to determine how many
pods should be added.
type: string
scaleUpThreshold:
description: ScaleUpThreshold is the percentage of busy runners
greater than which will trigger the hpa to scale runners up.
type: string
type:
description: Type is the type of metric to be used for autoscaling.
The only supported Type is TotalNumberOfQueuedAndInProgressWorkflowRuns
type: string
type: object
scaleUpTriggers:
description: "ScaleUpTriggers is an experimental feature to increase
the desired replicas by 1 on each webhook requested received by
the webhookBasedAutoscaler. \n This feature requires you to also
enable and deploy the webhookBasedAutoscaler onto your cluster.
\n Note that the added runners remain until the next sync period
at least, and they may or may not be used by GitHub Actions depending
on the timing. They are intended to be used to gain \"resource slack\"
immediately after you receive a webhook from GitHub, so that you
can loosely expect MinReplicas runners to be always available."
items:
properties:
amount:
type: integer
duration:
type: string
githubEvent:
properties:
checkRun:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
properties:
names:
description: Names is a list of GitHub Actions glob
patterns. Any check_run event whose name matches one
of patterns in the list can trigger autoscaling. Note
that check_run name seem to equal to the job name
you've defined in your actions workflow yaml file.
So it is very likely that you can utilize this to
trigger depending on the job.
items:
type: string
type: array
repositories:
description: Repositories is a list of GitHub repositories.
Any check_run event whose repository matches one of
repositories in the list can trigger autoscaling.
items:
type: string
type: array
status:
type: string
types:
items:
type: string
type: array
type: object
pullRequest:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
properties:
branches:
items:
type: string
type: array
types:
items:
type: string
type: array
type: object
push:
description: PushSpec is the condition for triggering scale-up
on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
type: object
type: object
type: object
type: array
scheduledOverrides:
description: ScheduledOverrides is the list of ScheduledOverride.
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec
on schedule. The earlier a scheduled override is, the higher it
is prioritized.
items:
description: ScheduledOverride can be used to override a few fields
of HorizontalRunnerAutoscalerSpec on schedule. A schedule can
optionally be recurring, so that the correspoding override happens
every day, week, month, or year.
properties:
endTime:
description: EndTime is the time at which the first override
ends.
format: date-time
type: string
minReplicas:
description: MinReplicas is the number of runners while overriding.
If omitted, it doesn't override minReplicas.
minimum: 0
nullable: true
type: integer
recurrenceRule:
properties:
frequency:
description: Frequency is the name of a predefined interval
of each recurrence. The valid values are "Daily", "Weekly",
"Monthly", and "Yearly". If empty, the corresponding override
happens only once.
enum:
- Daily
- Weekly
- Monthly
- Yearly
type: string
untilTime:
description: UntilTime is the time of the final recurrence.
If empty, the schedule recurs forever.
format: date-time
type: string
type: object
startTime:
description: StartTime is the time at which the first override
starts.
format: date-time
type: string
required:
- endTime
- startTime
type: object
type: array
type: object
status:
properties:
cacheEntries:
items:
properties:
expirationTime:
format: date-time
type: string
key:
type: string
value:
type: integer
type: object
type: array
desiredReplicas:
description: DesiredReplicas is the total number of desired, non-terminated
and latest pods to be set for the primary RunnerSet This doesn't
include outdated pods while upgrading the deployment and replacing
the runnerset.
type: integer
lastSuccessfulScaleOutTime:
format: date-time
nullable: true
type: string
observedGeneration:
description: ObservedGeneration is the most recent generation observed
for the target. It corresponds to e.g. RunnerDeployment's generation,
which is updated on mutation by the API Server.
format: int64
type: integer
scheduledOverridesSummary:
description: ScheduledOverridesSummary is the summary of active and
upcoming scheduled overrides to be shown in e.g. a column of a `kubectl
get hra` output for observability.
type: string
type: object
type: object
type: array
minReplicas:
description: MinReplicas is the minimum number of replicas the deployment
is allowed to scale
type: integer
scaleDownDelaySecondsAfterScaleOut:
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay
for a scale down followed by a scale up Used to prevent flapping (down->up->down->...
loop)
type: integer
scaleTargetRef:
description: ScaleTargetRef sis the reference to scaled resource like
RunnerDeployment
properties:
name:
type: string
type: object
type: object
status:
properties:
desiredReplicas:
description: DesiredReplicas is the total number of desired, non-terminated
and latest pods to be set for the primary RunnerSet This doesn't include
outdated pods while upgrading the deployment and replacing the runnerset.
type: integer
lastSuccessfulScaleOutTime:
format: date-time
type: string
observedGeneration:
description: ObservedGeneration is the most recent generation observed
for the target. It corresponds to e.g. RunnerDeployment's generation,
which is updated on mutation by the API Server.
format: int64
type: integer
type: object
type: object
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""

View File

@@ -1,40 +0,0 @@
## Upgrading
This project makes extensive use of CRDs to provide much of its functionality. Helm unfortunately does not support [managing](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/) CRDs by design:
_The full breakdown as to how they came to this decision and why they have taken the approach they have for dealing with CRDs can be found in [Helm Improvement Proposal 11](https://github.com/helm/community/blob/main/hips/hip-0011.md)_
```
There is no support at this time for upgrading or deleting CRDs using Helm. This was an explicit decision after much
community discussion due to the danger for unintentional data loss. Furthermore, there is currently no community
consensus around how to handle CRDs and their lifecycle. As this evolves, Helm will add support for those use cases.
```
Helm will do an initial install of CRDs but it will not touch them afterwards (update or delete).
Additionally, because the project leverages CRDs so extensively you **MUST** run the matching controller app container with its matching CRDs i.e. always redeploy your CRDs if you are changing the app version.
Due to the above you can't just do a `helm upgrade` to release the latest version of the chart, the best practice steps are recorded below:
## Steps
1. Upgrade CRDs
```shell
# REMEMBER TO UPDATE THE CHART_VERSION TO RELEVANT CHART VERISON!!!!
CHART_VERSION=0.11.0
curl -L https://github.com/actions-runner-controller/actions-runner-controller/releases/download/actions-runner-controller-${CHART_VERSION}/actions-runner-controller-${CHART_VERSION}.tgz | tar zxv --strip 1 actions-runner-controller/crds
kubectl apply -f crds/
```
2. Upgrade the Helm release
```shell
helm upgrade --install \
--namespace actions-runner-system \
--version ${CHART_VERSION} \
actions-runner-controller/actions-runner-controller \
actions-runner-controller
```

View File

@@ -1,8 +1,8 @@
1. Get the application URL by running these commands:
{{- if .Values.githubWebhookServer.ingress.enabled }}
{{- range $host := .Values.githubWebhookServer.ingress.hosts }}
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.githubWebhookServer.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}

View File

@@ -1,60 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "actions-runner-controller-github-webhook-server.name" -}}
{{- default .Chart.Name .Values.githubWebhookServer.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- define "actions-runner-controller-github-webhook-server.instance" -}}
{{- printf "%s-%s" .Release.Name "github-webhook-server" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "actions-runner-controller-github-webhook-server.fullname" -}}
{{- if .Values.githubWebhookServer.fullnameOverride }}
{{- .Values.githubWebhookServer.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.githubWebhookServer.nameOverride }}
{{- $instance := include "actions-runner-controller-github-webhook-server.instance" . }}
{{- if contains $name $instance }}
{{- $instance | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s-%s" .Release.Name $name "github-webhook-server" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "actions-runner-controller-github-webhook-server.selectorLabels" -}}
app.kubernetes.io/name: {{ include "actions-runner-controller-github-webhook-server.name" . }}
app.kubernetes.io/instance: {{ include "actions-runner-controller-github-webhook-server.instance" . }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "actions-runner-controller-github-webhook-server.serviceAccountName" -}}
{{- if .Values.githubWebhookServer.serviceAccount.create }}
{{- default (include "actions-runner-controller-github-webhook-server.fullname" .) .Values.githubWebhookServer.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.githubWebhookServer.serviceAccount.name }}
{{- end }}
{{- end }}
{{- define "actions-runner-controller-github-webhook-server.secretName" -}}
{{- default (include "actions-runner-controller-github-webhook-server.fullname" .) .Values.githubWebhookServer.secret.name }}
{{- end }}
{{- define "actions-runner-controller-github-webhook-server.roleName" -}}
{{- include "actions-runner-controller-github-webhook-server.fullname" . }}
{{- end }}
{{- define "actions-runner-controller-github-webhook-server.serviceMonitorName" -}}
{{- include "actions-runner-controller-github-webhook-server.fullname" . | trunc 47 }}-service-monitor
{{- end }}

View File

@@ -64,10 +64,6 @@ Create the name of the service account to use
{{- end }}
{{- end }}
{{- define "actions-runner-controller.secretName" -}}
{{- default (include "actions-runner-controller.fullname" .) .Values.authSecret.name -}}
{{- end }}
{{- define "actions-runner-controller.leaderElectionRoleName" -}}
{{- include "actions-runner-controller.fullname" . }}-leader-election
{{- end }}
@@ -89,15 +85,11 @@ Create the name of the service account to use
{{- end }}
{{- define "actions-runner-controller.webhookServiceName" -}}
{{- include "actions-runner-controller.fullname" . | trunc 55 }}-webhook
{{- include "actions-runner-controller.fullname" . }}-webhook
{{- end }}
{{- define "actions-runner-controller.metricsServiceName" -}}
{{- include "actions-runner-controller.fullname" . | trunc 47 }}-metrics-service
{{- end }}
{{- define "actions-runner-controller.serviceMonitorName" -}}
{{- include "actions-runner-controller.fullname" . | trunc 47 }}-service-monitor
{{- define "actions-runner-controller.authProxyServiceName" -}}
{{- include "actions-runner-controller.fullname" . }}-controller-manager-metrics-service
{{- end }}
{{- define "actions-runner-controller.selfsignedIssuerName" -}}

View File

@@ -1,4 +1,3 @@
{{- if .Values.metrics.proxy.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
@@ -12,4 +11,3 @@ rules:
resources:
- subjectaccessreviews
verbs: ["create"]
{{- end }}

View File

@@ -1,4 +1,3 @@
{{- if .Values.metrics.proxy.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
@@ -11,4 +10,3 @@ subjects:
- kind: ServiceAccount
name: {{ include "actions-runner-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -3,12 +3,12 @@ kind: Service
metadata:
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
name: {{ include "actions-runner-controller.metricsServiceName" . }}
name: {{ include "actions-runner-controller.authProxyServiceName" . }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- name: metrics-port
port: {{ .Values.metrics.port }}
targetPort: metrics-port
- name: https
port: 8443
targetPort: https
selector:
{{- include "actions-runner-controller.selectorLabels" . | nindent 4 }}

View File

@@ -5,7 +5,7 @@ apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ include "actions-runner-controller.selfsignedIssuerName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ .Namespace }}
spec:
selfSigned: {}
---
@@ -13,7 +13,7 @@ apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ include "actions-runner-controller.servingCertName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ .Namespace }}
spec:
dnsNames:
- {{ include "actions-runner-controller.webhookServiceName" . }}.{{ .Release.Namespace }}.svc

View File

@@ -1,10 +0,0 @@
# This template only exists to facilitate CI testing of the chart, since
# a secret is expected to be found in the namespace by the controller manager
{{ if .Values.createDummySecret -}}
apiVersion: v1
data:
github_token: dGVzdA==
kind: Secret
metadata:
name: controller-manager
{{- end }}

View File

@@ -1,24 +0,0 @@
{{- if .Values.metrics.serviceMonitor }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
{{- with .Values.metrics.serviceMonitorLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "actions-runner-controller.serviceMonitorName" . }}
spec:
endpoints:
- path: /metrics
port: metrics-port
{{- if .Values.metrics.proxy.enabled }}
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
scheme: https
tlsConfig:
insecureSkipVerify: true
{{- end }}
selector:
matchLabels:
{{- include "actions-runner-controller.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@@ -6,7 +6,6 @@ metadata:
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "actions-runner-controller.selectorLabels" . | nindent 6 }}
@@ -18,9 +17,6 @@ spec:
{{- end }}
labels:
{{- include "actions-runner-controller.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
@@ -34,56 +30,33 @@ spec:
{{- end }}
containers:
- args:
{{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
{{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }}
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
- "--metrics-addr=127.0.0.1:8080"
- "--enable-leader-election"
- "--sync-period={{ .Values.syncPeriod }}"
- "--docker-image={{ .Values.image.dindSidecarRepositoryAndTag }}"
- "--runner-image={{ .Values.image.actionsRunnerRepositoryAndTag }}"
{{- if .Values.dockerRegistryMirror }}
- "--docker-registry-mirror={{ .Values.dockerRegistryMirror }}"
{{- end }}
{{- if .Values.scope.singleNamespace }}
- "--watch-namespace={{ default .Release.Namespace .Values.scope.watchNamespace }}"
{{- end }}
{{- if .Values.githubAPICacheDuration }}
- "--github-api-cache-duration={{ .Values.githubAPICacheDuration }}"
{{- end }}
{{- if .Values.logLevel }}
- "--log-level={{ .Values.logLevel }}"
{{- end }}
command:
- "/manager"
env:
{{- if .Values.githubEnterpriseServerURL }}
- name: GITHUB_ENTERPRISE_URL
value: {{ .Values.githubEnterpriseServerURL }}
{{- end }}
- name: GITHUB_TOKEN
valueFrom:
secretKeyRef:
key: github_token
name: {{ include "actions-runner-controller.secretName" . }}
name: controller-manager
optional: true
- name: GITHUB_APP_ID
valueFrom:
secretKeyRef:
key: github_app_id
name: {{ include "actions-runner-controller.secretName" . }}
name: controller-manager
optional: true
- name: GITHUB_APP_INSTALLATION_ID
valueFrom:
secretKeyRef:
key: github_app_installation_id
name: {{ include "actions-runner-controller.secretName" . }}
name: controller-manager
optional: true
- name: GITHUB_APP_PRIVATE_KEY
value: /etc/actions-runner-controller/github_app_private_key
{{- range $key, $val := .Values.env }}
- name: {{ $key }}
value: {{ $val | quote }}
{{- end }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
name: manager
imagePullPolicy: {{ .Values.image.pullPolicy }}
@@ -91,52 +64,34 @@ spec:
- containerPort: 9443
name: webhook-server
protocol: TCP
{{- if not .Values.metrics.proxy.enabled }}
- containerPort: {{ .Values.metrics.port }}
name: metrics-port
protocol: TCP
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
volumeMounts:
- mountPath: "/etc/actions-runner-controller"
name: secret
name: controller-manager
readOnly: true
- mountPath: /tmp
name: tmp
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
{{- if .Values.metrics.proxy.enabled }}
- args:
- "--secure-listen-address=0.0.0.0:{{ .Values.metrics.port }}"
- "--secure-listen-address=0.0.0.0:8443"
- "--upstream=http://127.0.0.1:8080/"
- "--logtostderr=true"
- "--v=10"
image: "{{ .Values.metrics.proxy.image.repository }}:{{ .Values.metrics.proxy.image.tag }}"
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1
name: kube-rbac-proxy
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- containerPort: {{ .Values.metrics.port }}
name: metrics-port
resources:
{{- toYaml .Values.resources | nindent 12 }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
{{- end }}
- containerPort: 8443
name: https
terminationGracePeriodSeconds: 10
volumes:
- name: secret
- name: controller-manager
secret:
secretName: {{ include "actions-runner-controller.secretName" . }}
secretName: controller-manager
- name: cert
secret:
defaultMode: 420
secretName: webhook-server-cert
- name: tmp
emptyDir: {}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
@@ -149,7 +104,3 @@ spec:
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,108 +0,0 @@
{{- if .Values.githubWebhookServer.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "actions-runner-controller-github-webhook-server.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.githubWebhookServer.replicaCount }}
selector:
matchLabels:
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.githubWebhookServer.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 8 }}
{{- with .Values.githubWebhookServer.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.githubWebhookServer.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "actions-runner-controller-github-webhook-server.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.githubWebhookServer.podSecurityContext | nindent 8 }}
{{- with .Values.githubWebhookServer.priorityClassName }}
priorityClassName: "{{ . }}"
{{- end }}
containers:
- args:
{{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
{{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }}
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
- "--sync-period={{ .Values.githubWebhookServer.syncPeriod }}"
{{- if .Values.githubWebhookServer.logLevel }}
- "--log-level={{ .Values.githubWebhookServer.logLevel }}"
{{- end }}
command:
- "/github-webhook-server"
env:
- name: GITHUB_WEBHOOK_SECRET_TOKEN
valueFrom:
secretKeyRef:
key: github_webhook_secret_token
name: {{ include "actions-runner-controller-github-webhook-server.secretName" . }}
optional: true
{{- range $key, $val := .Values.githubWebhookServer.env }}
- name: {{ $key }}
value: {{ $val | quote }}
{{- end }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
name: github-webhook-server
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- containerPort: 8000
name: http
protocol: TCP
{{- if not .Values.metrics.proxy.enabled }}
- containerPort: {{ .Values.metrics.port }}
name: metrics-port
protocol: TCP
{{- end }}
resources:
{{- toYaml .Values.githubWebhookServer.resources | nindent 12 }}
securityContext:
{{- toYaml .Values.githubWebhookServer.securityContext | nindent 12 }}
{{- if .Values.metrics.proxy.enabled }}
- args:
- "--secure-listen-address=0.0.0.0:{{ .Values.metrics.port }}"
- "--upstream=http://127.0.0.1:8080/"
- "--logtostderr=true"
- "--v=10"
image: "{{ .Values.metrics.proxy.image.repository }}:{{ .Values.metrics.proxy.image.tag }}"
name: kube-rbac-proxy
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- containerPort: {{ .Values.metrics.port }}
name: metrics-port
resources:
{{- toYaml .Values.resources | nindent 12 }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
{{- end }}
terminationGracePeriodSeconds: 10
{{- with .Values.githubWebhookServer.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.githubWebhookServer.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.githubWebhookServer.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.githubWebhookServer.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -1,41 +0,0 @@
{{- if .Values.githubWebhookServer.ingress.enabled -}}
{{- $fullName := include "actions-runner-controller-github-webhook-server.fullname" . -}}
{{- $svcPort := (index .Values.githubWebhookServer.service.ports 0).port -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
{{- with .Values.githubWebhookServer.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.githubWebhookServer.ingress.tls }}
tls:
{{- range .Values.githubWebhookServer.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.githubWebhookServer.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,90 +0,0 @@
{{- if .Values.githubWebhookServer.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: {{ include "actions-runner-controller-github-webhook-server.roleName" . }}
rules:
- apiGroups:
- actions.summerwind.dev
resources:
- horizontalrunnerautoscalers
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.summerwind.dev
resources:
- horizontalrunnerautoscalers/finalizers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.summerwind.dev
resources:
- horizontalrunnerautoscalers/status
verbs:
- get
- patch
- update
- apiGroups:
- actions.summerwind.dev
resources:
- runnersets
verbs:
- get
- list
- watch
- apiGroups:
- actions.summerwind.dev
resources:
- runnerdeployments
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.summerwind.dev
resources:
- runnerdeployments/finalizers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.summerwind.dev
resources:
- runnerdeployments/status
verbs:
- get
- patch
- update
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
{{- end }}

View File

@@ -1,14 +0,0 @@
{{- if .Values.githubWebhookServer.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "actions-runner-controller-github-webhook-server.roleName" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "actions-runner-controller-github-webhook-server.roleName" . }}
subjects:
- kind: ServiceAccount
name: {{ include "actions-runner-controller-github-webhook-server.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,16 +0,0 @@
{{- if .Values.githubWebhookServer.enabled }}
{{- if .Values.githubWebhookServer.secret.create }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "actions-runner-controller-github-webhook-server.secretName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
type: Opaque
data:
{{- if .Values.githubWebhookServer.secret.github_webhook_secret_token }}
github_webhook_secret_token: {{ .Values.githubWebhookServer.secret.github_webhook_secret_token | toString | b64enc }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,26 +0,0 @@
{{- if .Values.githubWebhookServer.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "actions-runner-controller-github-webhook-server.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
{{- if .Values.githubWebhookServer.service.annotations }}
annotations:
{{ toYaml .Values.githubWebhookServer.service.annotations | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.githubWebhookServer.service.type }}
ports:
{{ range $_, $port := .Values.githubWebhookServer.service.ports -}}
- {{ $port | toYaml | nindent 6 }}
{{- end }}
{{- if .Values.metrics.serviceMonitor }}
- name: metrics-port
port: {{ .Values.metrics.port }}
targetPort: metrics-port
{{- end }}
selector:
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 4 }}
{{- end }}

View File

@@ -1,24 +0,0 @@
{{- if and .Values.githubWebhookServer.enabled .Values.metrics.serviceMonitor }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
{{- with .Values.metrics.serviceMonitorLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "actions-runner-controller-github-webhook-server.serviceMonitorName" . }}
spec:
endpoints:
- path: /metrics
port: metrics-port
{{- if .Values.metrics.proxy.enabled }}
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
scheme: https
tlsConfig:
insecureSkipVerify: true
{{- end }}
selector:
matchLabels:
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@@ -1,15 +0,0 @@
{{- if .Values.githubWebhookServer.enabled -}}
{{- if .Values.githubWebhookServer.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "actions-runner-controller-github-webhook-server.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
{{- with .Values.githubWebhookServer.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -132,62 +132,6 @@ rules:
- get
- patch
- update
- apiGroups:
- actions.summerwind.dev
resources:
- runnersets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.summerwind.dev
resources:
- runnersets/finalizers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.summerwind.dev
resources:
- runnersets/status
verbs:
- get
- patch
- update
- apiGroups:
- "apps"
resources:
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- "apps"
resources:
- statefulsets/finalizers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
@@ -195,15 +139,6 @@ rules:
verbs:
- create
- patch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- list
- update
- apiGroups:
- ""
resources:

View File

@@ -1,23 +1,14 @@
{{- if .Values.authSecret.create }}
{{- if or .Values.authSecret.enabled }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "actions-runner-controller.secretName" . }}
name: controller-manager
namespace: {{ .Release.Namespace }}
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
type: Opaque
data:
{{- if .Values.authSecret.github_app_id }}
github_app_id: {{ .Values.authSecret.github_app_id | toString | b64enc }}
{{- end }}
{{- if .Values.authSecret.github_app_installation_id }}
github_app_installation_id: {{ .Values.authSecret.github_app_installation_id | toString | b64enc }}
{{- end }}
{{- if .Values.authSecret.github_app_private_key }}
github_app_private_key: {{ .Values.authSecret.github_app_private_key | toString | b64enc }}
{{- end }}
{{- if .Values.authSecret.github_token }}
github_token: {{ .Values.authSecret.github_token | toString | b64enc }}
{{- end }}
{{- end }}
{{- range $k, $v := .Values.authSecret }}
{{ $k }}: {{ $v | toString | b64enc }}
{{- end }}
{{- end }}

View File

@@ -1,6 +1,6 @@
---
apiVersion: admissionregistration.k8s.io/v1
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
creationTimestamp: null
@@ -8,9 +8,8 @@ metadata:
annotations:
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "actions-runner-controller.servingCertName" . }}
webhooks:
- admissionReviewVersions:
- v1beta1
clientConfig:
- clientConfig:
caBundle: Cg==
service:
name: {{ include "actions-runner-controller.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
@@ -27,10 +26,8 @@ webhooks:
- UPDATE
resources:
- runners
sideEffects: None
- admissionReviewVersions:
- v1beta1
clientConfig:
- clientConfig:
caBundle: Cg==
service:
name: {{ include "actions-runner-controller.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
@@ -47,10 +44,8 @@ webhooks:
- UPDATE
resources:
- runnerdeployments
sideEffects: None
- admissionReviewVersions:
- v1beta1
clientConfig:
- clientConfig:
caBundle: Cg==
service:
name: {{ include "actions-runner-controller.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
@@ -67,31 +62,9 @@ webhooks:
- UPDATE
resources:
- runnerreplicasets
sideEffects: None
- admissionReviewVersions:
- v1beta1
clientConfig:
service:
name: {{ include "actions-runner-controller.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
path: /mutate-runner-set-pod
failurePolicy: Fail
name: mutate-runner-pod.webhook.actions.summerwind.dev
rules:
- apiGroups:
- ""
apiVersions:
- v1
operations:
- CREATE
resources:
- pods
sideEffects: None
objectSelector:
matchLabels:
"actions-runner-controller/inject-registration-token": "true"
---
apiVersion: admissionregistration.k8s.io/v1
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
creationTimestamp: null
@@ -99,9 +72,8 @@ metadata:
annotations:
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "actions-runner-controller.servingCertName" . }}
webhooks:
- admissionReviewVersions:
- v1beta1
clientConfig:
- clientConfig:
caBundle: Cg==
service:
name: {{ include "actions-runner-controller.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
@@ -118,10 +90,8 @@ webhooks:
- UPDATE
resources:
- runners
sideEffects: None
- admissionReviewVersions:
- v1beta1
clientConfig:
- clientConfig:
caBundle: Cg==
service:
name: {{ include "actions-runner-controller.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
@@ -138,10 +108,8 @@ webhooks:
- UPDATE
resources:
- runnerdeployments
sideEffects: None
- admissionReviewVersions:
- v1beta1
clientConfig:
- clientConfig:
caBundle: Cg==
service:
name: {{ include "actions-runner-controller.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
@@ -158,4 +126,3 @@ webhooks:
- UPDATE
resources:
- runnerreplicasets
sideEffects: None

View File

@@ -8,19 +8,10 @@ replicaCount: 1
syncPeriod: 10m
# The controller tries its best not to repeat the duplicate GitHub API call
# within this duration.
# Defaults to syncPeriod - 10s.
#githubAPICacheDuration: 30s
# The URL of your GitHub Enterprise server, if you're using one.
#githubEnterpriseServerURL: https://github.example.com
# Only 1 authentication method can be deployed at a time
# Uncomment the configuration you are applying and fill in the details
authSecret:
create: false
name: "controller-manager"
enabled: false
### GitHub Apps Configuration
#github_app_id: ""
#github_app_installation_id: ""
@@ -28,10 +19,10 @@ authSecret:
### GitHub PAT Configuration
#github_token: ""
dockerRegistryMirror: ""
image:
repository: "summerwind/actions-runner-controller"
actionsRunnerRepositoryAndTag: "summerwind/actions-runner:latest"
repository: summerwind/actions-runner-controller
# Overrides the manager image tag whose default is the chart appVersion if the tag key is commented out
tag: "latest"
dindSidecarRepositoryAndTag: "docker:dind"
pullPolicy: IfNotPresent
@@ -50,14 +41,10 @@ serviceAccount:
podAnnotations: {}
podLabels: {}
podSecurityContext:
{}
podSecurityContext: {}
# fsGroup: 2000
securityContext:
{}
securityContext: {}
# capabilities:
# drop:
# - ALL
@@ -69,18 +56,20 @@ service:
type: ClusterIP
port: 443
metrics:
serviceMonitor: false
serviceMonitorLabels: {}
port: 8443
proxy:
enabled: true
image:
repository: quay.io/brancz/kube-rbac-proxy
tag: v0.10.0
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources:
{}
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
@@ -92,6 +81,13 @@ resources:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
@@ -102,69 +98,3 @@ affinity: {}
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
# PriorityClass: system-cluster-critical
priorityClassName: ""
env:
{}
# http_proxy: "proxy.com:8080"
# https_proxy: "proxy.com:8080"
# no_proxy: ""
scope:
# If true, the controller will only watch custom resources in a single namespace
singleNamespace: false
# If `scope.singleNamespace=true`, the controller will only watch custom resources in this namespace
# The default value is "", which means the namespace of the controller
watchNamespace: ""
githubWebhookServer:
enabled: false
replicaCount: 1
syncPeriod: 10m
secret:
create: false
name: "github-webhook-server"
### GitHub Webhook Configuration
github_webhook_secret_token: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
priorityClassName: ""
service:
type: ClusterIP
annotations: {}
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
#nodePort: someFixedPortForUseWithTerraformCdkCfnEtc
ingress:
enabled: false
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local

View File

@@ -1,191 +0,0 @@
/*
Copyright 2021 The actions-runner-controller authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"errors"
"flag"
"net/http"
"os"
"sync"
"time"
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
"github.com/actions-runner-controller/actions-runner-controller/controllers"
zaplib "go.uber.org/zap"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/exec"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
// +kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
const (
logLevelDebug = "debug"
logLevelInfo = "info"
logLevelWarn = "warn"
logLevelError = "error"
)
func init() {
_ = clientgoscheme.AddToScheme(scheme)
_ = actionsv1alpha1.AddToScheme(scheme)
// +kubebuilder:scaffold:scheme
}
func main() {
var (
err error
webhookAddr string
metricsAddr string
// The secret token of the GitHub Webhook. See https://docs.github.com/en/developers/webhooks-and-events/securing-your-webhooks
webhookSecretToken string
watchNamespace string
enableLeaderElection bool
syncPeriod time.Duration
logLevel string
)
webhookSecretToken = os.Getenv("GITHUB_WEBHOOK_SECRET_TOKEN")
flag.StringVar(&webhookAddr, "webhook-addr", ":8000", "The address the metric endpoint binds to.")
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&watchNamespace, "watch-namespace", "", "The namespace to watch for HorizontalRunnerAutoscaler's to scale on Webhook. Set to empty for letting it watch for all namespaces.")
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled. When you use autoscaling, set to a lower value like 10 minute, because this corresponds to the minimum time to react on demand change")
flag.StringVar(&logLevel, "log-level", logLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
flag.Parse()
if webhookSecretToken == "" {
setupLog.Info("-webhook-secret-token is missing or empty. Create one following https://docs.github.com/en/developers/webhooks-and-events/securing-your-webhooks")
}
if watchNamespace == "" {
setupLog.Info("-watch-namespace is empty. HorizontalRunnerAutoscalers in all the namespaces are watched, cached, and considered as scale targets.")
} else {
setupLog.Info("-watch-namespace is %q. Only HorizontalRunnerAutoscalers in %q are watched, cached, and considered as scale targets.")
}
logger := zap.New(func(o *zap.Options) {
switch logLevel {
case logLevelDebug:
o.Development = true
case logLevelInfo:
lvl := zaplib.NewAtomicLevelAt(zaplib.InfoLevel)
o.Level = &lvl
case logLevelWarn:
lvl := zaplib.NewAtomicLevelAt(zaplib.WarnLevel)
o.Level = &lvl
case logLevelError:
lvl := zaplib.NewAtomicLevelAt(zaplib.ErrorLevel)
o.Level = &lvl
}
})
ctrl.SetLogger(logger)
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
SyncPeriod: &syncPeriod,
LeaderElection: enableLeaderElection,
Namespace: watchNamespace,
MetricsBindAddress: metricsAddr,
Port: 9443,
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
hraGitHubWebhook := &controllers.HorizontalRunnerAutoscalerGitHubWebhook{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("Runner"),
Recorder: nil,
Scheme: mgr.GetScheme(),
SecretKeyBytes: []byte(webhookSecretToken),
Namespace: watchNamespace,
}
if err = hraGitHubWebhook.SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Runner")
os.Exit(1)
}
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
wg.Add(1)
go func() {
defer cancel()
defer wg.Done()
setupLog.Info("starting webhook server")
if err := mgr.Start(ctx); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}()
mux := http.NewServeMux()
mux.HandleFunc("/", hraGitHubWebhook.Handle)
srv := http.Server{
Addr: webhookAddr,
Handler: mux,
}
wg.Add(1)
go func() {
defer cancel()
defer wg.Done()
go func() {
<-ctx.Done()
srv.Shutdown(context.Background())
}()
if err := srv.ListenAndServe(); err != nil {
if !errors.Is(err, http.ErrServerClosed) {
setupLog.Error(err, "problem running http server")
}
}
}()
go func() {
<-ctrl.SetupSignalHandler().Done()
cancel()
}()
wg.Wait()
}

View File

@@ -1,304 +1,133 @@
---
apiVersion: apiextensions.k8s.io/v1
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.6.0
controller-gen.kubebuilder.io/version: v0.3.0
creationTimestamp: null
name: horizontalrunnerautoscalers.actions.summerwind.dev
spec:
additionalPrinterColumns:
- JSONPath: .spec.minReplicas
name: Min
type: number
- JSONPath: .spec.maxReplicas
name: Max
type: number
- JSONPath: .status.desiredReplicas
name: Desired
type: number
group: actions.summerwind.dev
names:
kind: HorizontalRunnerAutoscaler
listKind: HorizontalRunnerAutoscalerList
plural: horizontalrunnerautoscalers
shortNames:
- hra
singular: horizontalrunnerautoscaler
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.minReplicas
name: Min
type: number
- jsonPath: .spec.maxReplicas
name: Max
type: number
- jsonPath: .status.desiredReplicas
name: Desired
type: number
- jsonPath: .status.scheduledOverridesSummary
name: Schedule
type: string
name: v1alpha1
schema:
openAPIV3Schema:
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: HorizontalRunnerAutoscalerSpec defines the desired state
of HorizontalRunnerAutoscaler
properties:
capacityReservations:
items:
description: CapacityReservation specifies the number of replicas
temporarily added to the scale target until ExpirationTime.
properties:
expirationTime:
format: date-time
type: string
name:
type: string
replicas:
type: integer
type: object
type: array
maxReplicas:
description: MinReplicas is the maximum number of replicas the deployment
is allowed to scale
type: integer
metrics:
description: Metrics is the collection of various metric targets to
calculate desired number of runners
items:
properties:
repositoryNames:
description: RepositoryNames is the list of repository names
to be used for calculating the metric. For example, a repository
name is the REPO part of `github.com/USER/REPO`.
items:
type: string
type: array
scaleDownAdjustment:
description: ScaleDownAdjustment is the number of runners removed
on scale-down. You can only specify either ScaleDownFactor
or ScaleDownAdjustment.
type: integer
scaleDownFactor:
description: ScaleDownFactor is the multiplicative factor applied
to the current number of runners used to determine how many
pods should be removed.
type: string
scaleDownThreshold:
description: ScaleDownThreshold is the percentage of busy runners
less than which will trigger the hpa to scale the runners
down.
type: string
scaleUpAdjustment:
description: ScaleUpAdjustment is the number of runners added
on scale-up. You can only specify either ScaleUpFactor or
ScaleUpAdjustment.
type: integer
scaleUpFactor:
description: ScaleUpFactor is the multiplicative factor applied
to the current number of runners used to determine how many
pods should be added.
type: string
scaleUpThreshold:
description: ScaleUpThreshold is the percentage of busy runners
greater than which will trigger the hpa to scale runners up.
type: string
type:
description: Type is the type of metric to be used for autoscaling.
The only supported Type is TotalNumberOfQueuedAndInProgressWorkflowRuns
type: string
type: object
type: array
minReplicas:
description: MinReplicas is the minimum number of replicas the deployment
is allowed to scale
type: integer
scaleDownDelaySecondsAfterScaleOut:
description: ScaleDownDelaySecondsAfterScaleUp is the approximate
delay for a scale down followed by a scale up Used to prevent flapping
(down->up->down->... loop)
type: integer
scaleTargetRef:
description: ScaleTargetRef sis the reference to scaled resource like
RunnerDeployment
subresources:
status: {}
validation:
openAPIV3Schema:
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: HorizontalRunnerAutoscalerSpec defines the desired state of
HorizontalRunnerAutoscaler
properties:
maxReplicas:
description: MinReplicas is the maximum number of replicas the deployment
is allowed to scale
type: integer
metrics:
description: Metrics is the collection of various metric targets to
calculate desired number of runners
items:
properties:
kind:
description: Kind is the type of resource being referenced
enum:
- RunnerDeployment
- RunnerSet
repositoryNames:
description: RepositoryNames is the list of repository names to
be used for calculating the metric. For example, a repository
name is the REPO part of `github.com/USER/REPO`.
items:
type: string
type: array
scaleDownFactor:
description: ScaleDownFactor is the multiplicative factor applied
to the current number of runners used to determine how many
pods should be removed.
type: string
name:
description: Name is the name of resource being referenced
scaleDownThreshold:
description: ScaleDownThreshold is the percentage of busy runners
less than which will trigger the hpa to scale the runners down.
type: string
scaleUpFactor:
description: ScaleUpFactor is the multiplicative factor applied
to the current number of runners used to determine how many
pods should be added.
type: string
scaleUpThreshold:
description: ScaleUpThreshold is the percentage of busy runners
greater than which will trigger the hpa to scale runners up.
type: string
type:
description: Type is the type of metric to be used for autoscaling.
The only supported Type is TotalNumberOfQueuedAndInProgressWorkflowRuns
type: string
type: object
scaleUpTriggers:
description: "ScaleUpTriggers is an experimental feature to increase
the desired replicas by 1 on each webhook requested received by
the webhookBasedAutoscaler. \n This feature requires you to also
enable and deploy the webhookBasedAutoscaler onto your cluster.
\n Note that the added runners remain until the next sync period
at least, and they may or may not be used by GitHub Actions depending
on the timing. They are intended to be used to gain \"resource slack\"
immediately after you receive a webhook from GitHub, so that you
can loosely expect MinReplicas runners to be always available."
items:
properties:
amount:
type: integer
duration:
type: string
githubEvent:
properties:
checkRun:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
properties:
names:
description: Names is a list of GitHub Actions glob
patterns. Any check_run event whose name matches one
of patterns in the list can trigger autoscaling. Note
that check_run name seem to equal to the job name
you've defined in your actions workflow yaml file.
So it is very likely that you can utilize this to
trigger depending on the job.
items:
type: string
type: array
repositories:
description: Repositories is a list of GitHub repositories.
Any check_run event whose repository matches one of
repositories in the list can trigger autoscaling.
items:
type: string
type: array
status:
type: string
types:
items:
type: string
type: array
type: object
pullRequest:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
properties:
branches:
items:
type: string
type: array
types:
items:
type: string
type: array
type: object
push:
description: PushSpec is the condition for triggering scale-up
on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
type: object
type: object
type: object
type: array
scheduledOverrides:
description: ScheduledOverrides is the list of ScheduledOverride.
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec
on schedule. The earlier a scheduled override is, the higher it
is prioritized.
items:
description: ScheduledOverride can be used to override a few fields
of HorizontalRunnerAutoscalerSpec on schedule. A schedule can
optionally be recurring, so that the correspoding override happens
every day, week, month, or year.
properties:
endTime:
description: EndTime is the time at which the first override
ends.
format: date-time
type: string
minReplicas:
description: MinReplicas is the number of runners while overriding.
If omitted, it doesn't override minReplicas.
minimum: 0
nullable: true
type: integer
recurrenceRule:
properties:
frequency:
description: Frequency is the name of a predefined interval
of each recurrence. The valid values are "Daily", "Weekly",
"Monthly", and "Yearly". If empty, the corresponding override
happens only once.
enum:
- Daily
- Weekly
- Monthly
- Yearly
type: string
untilTime:
description: UntilTime is the time of the final recurrence.
If empty, the schedule recurs forever.
format: date-time
type: string
type: object
startTime:
description: StartTime is the time at which the first override
starts.
format: date-time
type: string
required:
- endTime
- startTime
type: object
type: array
type: object
status:
properties:
cacheEntries:
items:
properties:
expirationTime:
format: date-time
type: string
key:
type: string
value:
type: integer
type: object
type: array
desiredReplicas:
description: DesiredReplicas is the total number of desired, non-terminated
and latest pods to be set for the primary RunnerSet This doesn't
include outdated pods while upgrading the deployment and replacing
the runnerset.
type: integer
lastSuccessfulScaleOutTime:
format: date-time
nullable: true
type: string
observedGeneration:
description: ObservedGeneration is the most recent generation observed
for the target. It corresponds to e.g. RunnerDeployment's generation,
which is updated on mutation by the API Server.
format: int64
type: integer
scheduledOverridesSummary:
description: ScheduledOverridesSummary is the summary of active and
upcoming scheduled overrides to be shown in e.g. a column of a `kubectl
get hra` output for observability.
type: string
type: object
type: object
type: array
minReplicas:
description: MinReplicas is the minimum number of replicas the deployment
is allowed to scale
type: integer
scaleDownDelaySecondsAfterScaleOut:
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay
for a scale down followed by a scale up Used to prevent flapping (down->up->down->...
loop)
type: integer
scaleTargetRef:
description: ScaleTargetRef sis the reference to scaled resource like
RunnerDeployment
properties:
name:
type: string
type: object
type: object
status:
properties:
desiredReplicas:
description: DesiredReplicas is the total number of desired, non-terminated
and latest pods to be set for the primary RunnerSet This doesn't include
outdated pods while upgrading the deployment and replacing the runnerset.
type: integer
lastSuccessfulScaleOutTime:
format: date-time
type: string
observedGeneration:
description: ObservedGeneration is the most recent generation observed
for the target. It corresponds to e.g. RunnerDeployment's generation,
which is updated on mutation by the API Server.
format: int64
type: integer
type: object
type: object
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:

View File

@@ -1,18 +1,17 @@
# The following patch enables conversion webhook for CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: runners.actions.summerwind.dev
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
caBundle: Cg==
service:
namespace: system
name: webhook-service
path: /convert
webhookClientConfig:
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
caBundle: Cg==
service:
namespace: system
name: webhook-service
path: /convert

View File

@@ -10,7 +10,7 @@ spec:
spec:
containers:
- name: kube-rbac-proxy
image: quay.io/brancz/kube-rbac-proxy:v0.10.0
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1
args:
- "--secure-listen-address=0.0.0.0:8443"
- "--upstream=http://127.0.0.1:8080/"

View File

@@ -1,13 +1,13 @@
# This patch add annotation to admission webhook config and
# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize.
apiVersion: admissionregistration.k8s.io/v1
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
name: mutating-webhook-configuration
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
---
apiVersion: admissionregistration.k8s.io/v1
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
name: validating-webhook-configuration

View File

@@ -134,67 +134,6 @@ rules:
- get
- patch
- update
- apiGroups:
- actions.summerwind.dev
resources:
- runnersets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.summerwind.dev
resources:
- runnersets/finalizers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.summerwind.dev
resources:
- runnersets/status
verbs:
- get
- patch
- update
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- statefulsets/status
verbs:
- get
- patch
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- list
- update
- apiGroups:
- ""
resources:

View File

@@ -3,4 +3,4 @@ kind: Runner
metadata:
name: summerwind-actions-runner-controller
spec:
repository: actions-runner-controller/actions-runner-controller
repository: summerwind/actions-runner-controller

View File

@@ -6,4 +6,4 @@ spec:
replicas: 2
template:
spec:
repository: actions-runner-controller/actions-runner-controller
repository: summerwind/actions-runner-controller

View File

@@ -6,4 +6,4 @@ spec:
replicas: 2
template:
spec:
repository: actions-runner-controller/actions-runner-controller
repository: summerwind/actions-runner-controller

View File

@@ -1,14 +1,13 @@
---
apiVersion: admissionregistration.k8s.io/v1
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
creationTimestamp: null
name: mutating-webhook-configuration
webhooks:
- admissionReviewVersions:
- v1beta1
clientConfig:
- clientConfig:
caBundle: Cg==
service:
name: webhook-service
namespace: system
@@ -25,10 +24,8 @@ webhooks:
- UPDATE
resources:
- runners
sideEffects: None
- admissionReviewVersions:
- v1beta1
clientConfig:
- clientConfig:
caBundle: Cg==
service:
name: webhook-service
namespace: system
@@ -45,10 +42,8 @@ webhooks:
- UPDATE
resources:
- runnerdeployments
sideEffects: None
- admissionReviewVersions:
- v1beta1
clientConfig:
- clientConfig:
caBundle: Cg==
service:
name: webhook-service
namespace: system
@@ -65,37 +60,16 @@ webhooks:
- UPDATE
resources:
- runnerreplicasets
sideEffects: None
- admissionReviewVersions:
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
path: /mutate-runner-set-pod
failurePolicy: Ignore
name: mutate-runner-pod.webhook.actions.summerwind.dev
rules:
- apiGroups:
- ""
apiVersions:
- v1
operations:
- CREATE
resources:
- pods
sideEffects: None
---
apiVersion: admissionregistration.k8s.io/v1
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
creationTimestamp: null
name: validating-webhook-configuration
webhooks:
- admissionReviewVersions:
- v1beta1
clientConfig:
- clientConfig:
caBundle: Cg==
service:
name: webhook-service
namespace: system
@@ -112,10 +86,8 @@ webhooks:
- UPDATE
resources:
- runners
sideEffects: None
- admissionReviewVersions:
- v1beta1
clientConfig:
- clientConfig:
caBundle: Cg==
service:
name: webhook-service
namespace: system
@@ -132,10 +104,8 @@ webhooks:
- UPDATE
resources:
- runnerdeployments
sideEffects: None
- admissionReviewVersions:
- v1beta1
clientConfig:
- clientConfig:
caBundle: Cg==
service:
name: webhook-service
namespace: system
@@ -152,4 +122,3 @@ webhooks:
- UPDATE
resources:
- runnerreplicasets
sideEffects: None

View File

@@ -7,9 +7,9 @@ import (
"math"
"strconv"
"strings"
"time"
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
@@ -19,48 +19,7 @@ const (
defaultScaleDownFactor = 0.7
)
func getValueAvailableAt(now time.Time, from, to *time.Time, reservedValue int) *int {
if to != nil && now.After(*to) {
return nil
}
if from != nil && now.Before(*from) {
return nil
}
return &reservedValue
}
func (r *HorizontalRunnerAutoscalerReconciler) fetchSuggestedReplicasFromCache(hra v1alpha1.HorizontalRunnerAutoscaler) *int {
var entry *v1alpha1.CacheEntry
for i := range hra.Status.CacheEntries {
ent := hra.Status.CacheEntries[i]
if ent.Key != v1alpha1.CacheEntryKeyDesiredReplicas {
continue
}
if !time.Now().Before(ent.ExpirationTime.Time) {
continue
}
entry = &ent
break
}
if entry != nil {
v := getValueAvailableAt(time.Now(), nil, &entry.ExpirationTime.Time, entry.Value)
if v != nil {
return v
}
}
return nil
}
func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
func (r *HorizontalRunnerAutoscalerReconciler) determineDesiredReplicas(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
if hra.Spec.MinReplicas == nil {
return nil, fmt.Errorf("horizontalrunnerautoscaler %s/%s is missing minReplicas", hra.Namespace, hra.Name)
} else if hra.Spec.MaxReplicas == nil {
@@ -68,87 +27,31 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(st scaleTa
}
metrics := hra.Spec.Metrics
numMetrics := len(metrics)
if numMetrics == 0 {
if len(hra.Spec.ScaleUpTriggers) == 0 {
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(st, hra, nil)
}
return nil, nil
} else if numMetrics > 2 {
return nil, fmt.Errorf("Too many autoscaling metrics configured: It must be 0 to 2, but got %d", numMetrics)
if len(metrics) == 0 || metrics[0].Type == v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns {
return r.calculateReplicasByQueuedAndInProgressWorkflowRuns(rd, hra)
} else if metrics[0].Type == v1alpha1.AutoscalingMetricTypePercentageRunnersBusy {
return r.calculateReplicasByPercentageRunnersBusy(rd, hra)
} else {
return nil, fmt.Errorf("validting autoscaling metrics: unsupported metric type %q", metrics[0].Type)
}
primaryMetric := metrics[0]
primaryMetricType := primaryMetric.Type
var (
suggested *int
err error
)
switch primaryMetricType {
case v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns:
suggested, err = r.suggestReplicasByQueuedAndInProgressWorkflowRuns(st, hra, &primaryMetric)
case v1alpha1.AutoscalingMetricTypePercentageRunnersBusy:
suggested, err = r.suggestReplicasByPercentageRunnersBusy(st, hra, primaryMetric)
default:
return nil, fmt.Errorf("validting autoscaling metrics: unsupported metric type %q", primaryMetric)
}
if err != nil {
return nil, err
}
if suggested != nil && *suggested > 0 {
return suggested, nil
}
if len(metrics) == 1 {
// This is never supposed to happen but anyway-
// Fall-back to `minReplicas + capacityReservedThroughWebhook`.
return nil, nil
}
// At this point, we are sure that there are exactly 2 Metrics entries.
fallbackMetric := metrics[1]
fallbackMetricType := fallbackMetric.Type
if primaryMetricType != v1alpha1.AutoscalingMetricTypePercentageRunnersBusy ||
fallbackMetricType != v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns {
return nil, fmt.Errorf(
"invalid HRA Spec: Metrics[0] of %s cannot be combined with Metrics[1] of %s: The only allowed combination is 0=PercentageRunnersBusy and 1=TotalNumberOfQueuedAndInProgressWorkflowRuns",
primaryMetricType, fallbackMetricType,
)
}
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(st, hra, &fallbackMetric)
}
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgressWorkflowRuns(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics *v1alpha1.MetricSpec) (*int, error) {
func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByQueuedAndInProgressWorkflowRuns(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
var repos [][]string
repoID := st.repo
metrics := hra.Spec.Metrics
repoID := rd.Spec.Template.Spec.Repository
if repoID == "" {
orgName := st.org
orgName := rd.Spec.Template.Spec.Organization
if orgName == "" {
return nil, fmt.Errorf("asserting runner deployment spec to detect bug: spec.template.organization should not be empty on this code path")
}
// In case it's an organizational runners deployment without any scaling metrics defined,
// we assume that the desired replicas should always be `minReplicas + capacityReservedThroughWebhook`.
// See https://github.com/actions-runner-controller/actions-runner-controller/issues/377#issuecomment-793372693
if metrics == nil {
return nil, nil
}
if len(metrics.RepositoryNames) == 0 {
if len(metrics[0].RepositoryNames) == 0 {
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].repositoryNames is required and must have one more more entries for organizational runner deployment")
}
for _, repoName := range metrics.RepositoryNames {
for _, repoName := range metrics[0].RepositoryNames {
repos = append(repos, []string{orgName, repoName})
}
} else {
@@ -193,12 +96,12 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
for _, repo := range repos {
user, repoName := repo[0], repo[1]
workflowRuns, err := r.GitHubClient.ListRepositoryWorkflowRuns(context.TODO(), user, repoName)
list, _, err := r.GitHubClient.Actions.ListRepositoryWorkflowRuns(context.TODO(), user, repoName, nil)
if err != nil {
return nil, err
}
for _, run := range workflowRuns {
for _, run := range list.WorkflowRuns {
total++
// In May 2020, there are only 3 statuses.
@@ -218,25 +121,43 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
}
}
minReplicas := *hra.Spec.MinReplicas
maxReplicas := *hra.Spec.MaxReplicas
necessaryReplicas := queued + inProgress
var desiredReplicas int
if necessaryReplicas < minReplicas {
desiredReplicas = minReplicas
} else if necessaryReplicas > maxReplicas {
desiredReplicas = maxReplicas
} else {
desiredReplicas = necessaryReplicas
}
rd.Status.Replicas = &desiredReplicas
replicas := desiredReplicas
r.Log.V(1).Info(
fmt.Sprintf("Suggested desired replicas of %d by TotalNumberOfQueuedAndInProgressWorkflowRuns", necessaryReplicas),
"Calculated desired replicas",
"computed_replicas_desired", desiredReplicas,
"spec_replicas_min", minReplicas,
"spec_replicas_max", maxReplicas,
"workflow_runs_completed", completed,
"workflow_runs_in_progress", inProgress,
"workflow_runs_queued", queued,
"workflow_runs_unknown", unknown,
"namespace", hra.Namespace,
"kind", st.kind,
"name", st.st,
"horizontal_runner_autoscaler", hra.Name,
)
return &necessaryReplicas, nil
return &replicas, nil
}
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunnersBusy(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics v1alpha1.MetricSpec) (*int, error) {
func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByPercentageRunnersBusy(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
ctx := context.Background()
orgName := rd.Spec.Template.Spec.Organization
minReplicas := *hra.Spec.MinReplicas
maxReplicas := *hra.Spec.MaxReplicas
metrics := hra.Spec.Metrics[0]
scaleUpThreshold := defaultScaleUpThreshold
scaleDownThreshold := defaultScaleDownThreshold
scaleUpFactor := defaultScaleUpFactor
@@ -257,34 +178,14 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
scaleDownThreshold = sdt
}
scaleUpAdjustment := metrics.ScaleUpAdjustment
if scaleUpAdjustment != 0 {
if metrics.ScaleUpAdjustment < 0 {
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].scaleUpAdjustment cannot be lower than 0")
}
if metrics.ScaleUpFactor != "" {
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[]: scaleUpAdjustment and scaleUpFactor cannot be specified together")
}
} else if metrics.ScaleUpFactor != "" {
if metrics.ScaleUpFactor != "" {
suf, err := strconv.ParseFloat(metrics.ScaleUpFactor, 64)
if err != nil {
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].scaleUpFactor cannot be parsed into a float64")
}
scaleUpFactor = suf
}
scaleDownAdjustment := metrics.ScaleDownAdjustment
if scaleDownAdjustment != 0 {
if metrics.ScaleDownAdjustment < 0 {
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].scaleDownAdjustment cannot be lower than 0")
}
if metrics.ScaleDownFactor != "" {
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[]: scaleDownAdjustment and scaleDownFactor cannot be specified together")
}
} else if metrics.ScaleDownFactor != "" {
if metrics.ScaleDownFactor != "" {
sdf, err := strconv.ParseFloat(metrics.ScaleDownFactor, 64)
if err != nil {
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].scaleDownFactor cannot be parsed into a float64")
@@ -292,91 +193,57 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
scaleDownFactor = sdf
}
runnerMap, err := st.getRunnerMap()
if err != nil {
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
var runnerList v1alpha1.RunnerList
if err := r.List(ctx, &runnerList, client.InNamespace(rd.Namespace)); err != nil {
return nil, err
}
var (
enterprise = st.enterprise
organization = st.org
repository = st.repo
)
runnerMap := make(map[string]struct{})
for _, items := range runnerList.Items {
runnerMap[items.Name] = struct{}{}
}
// ListRunners will return all runners managed by GitHub - not restricted to ns
runners, err := r.GitHubClient.ListRunners(
ctx,
enterprise,
organization,
repository)
runners, err := r.GitHubClient.ListRunners(ctx, orgName, "")
if err != nil {
return nil, err
}
var desiredReplicasBefore int
if v := st.replicas; v == nil {
desiredReplicasBefore = 1
} else {
desiredReplicasBefore = *v
}
var (
numRunners int
numRunnersRegistered int
numRunnersBusy int
)
numRunners = len(runnerMap)
numRunners := len(runnerList.Items)
numRunnersBusy := 0
for _, runner := range runners {
if _, ok := runnerMap[*runner.Name]; ok {
numRunnersRegistered++
if runner.GetBusy() {
numRunnersBusy++
}
if _, ok := runnerMap[*runner.Name]; ok && runner.GetBusy() {
numRunnersBusy++
}
}
var desiredReplicas int
fractionBusy := float64(numRunnersBusy) / float64(desiredReplicasBefore)
fractionBusy := float64(numRunnersBusy) / float64(numRunners)
if fractionBusy >= scaleUpThreshold {
if scaleUpAdjustment > 0 {
desiredReplicas = desiredReplicasBefore + scaleUpAdjustment
} else {
desiredReplicas = int(math.Ceil(float64(desiredReplicasBefore) * scaleUpFactor))
}
desiredReplicas = int(math.Ceil(float64(numRunners) * scaleUpFactor))
} else if fractionBusy < scaleDownThreshold {
if scaleDownAdjustment > 0 {
desiredReplicas = desiredReplicasBefore - scaleDownAdjustment
} else {
desiredReplicas = int(float64(desiredReplicasBefore) * scaleDownFactor)
}
desiredReplicas = int(float64(numRunners) * scaleDownFactor)
} else {
desiredReplicas = *st.replicas
desiredReplicas = *rd.Spec.Replicas
}
// NOTES for operators:
//
// - num_runners can be as twice as large as replicas_desired_before while
// the runnerdeployment controller is replacing RunnerReplicaSet for runner update.
if desiredReplicas < minReplicas {
desiredReplicas = minReplicas
} else if desiredReplicas > maxReplicas {
desiredReplicas = maxReplicas
}
r.Log.V(1).Info(
fmt.Sprintf("Suggested desired replicas of %d by PercentageRunnersBusy", desiredReplicas),
"replicas_desired_before", desiredReplicasBefore,
"replicas_desired", desiredReplicas,
"Calculated desired replicas",
"computed_replicas_desired", desiredReplicas,
"spec_replicas_min", minReplicas,
"spec_replicas_max", maxReplicas,
"current_replicas", rd.Spec.Replicas,
"num_runners", numRunners,
"num_runners_registered", numRunnersRegistered,
"num_runners_busy", numRunnersBusy,
"namespace", hra.Namespace,
"kind", st.kind,
"name", st.st,
"horizontal_runner_autoscaler", hra.Name,
"enterprise", enterprise,
"organization", organization,
"repository", repository,
)
return &desiredReplicas, nil
rd.Status.Replicas = &desiredReplicas
replicas := desiredReplicas
return &replicas, nil
}

View File

@@ -1,15 +1,14 @@
package controllers
import (
"context"
"fmt"
"net/http/httptest"
"net/url"
"testing"
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
"github.com/actions-runner-controller/actions-runner-controller/github"
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
"github.com/summerwind/actions-runner-controller/github"
"github.com/summerwind/actions-runner-controller/github/fake"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
@@ -41,18 +40,14 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
metav1Now := metav1.Now()
testcases := []struct {
repo string
org string
fixed *int
max *int
min *int
sReplicas *int
sTime *metav1.Time
workflowRuns string
workflowRuns_queued string
workflowRuns_in_progress string
repo string
org string
fixed *int
max *int
min *int
sReplicas *int
sTime *metav1.Time
workflowRuns string
workflowJobs map[int]string
want int
err string
@@ -60,107 +55,87 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
// Legacy functionality
// 3 demanded, max at 3
{
repo: "test/valid",
min: intPtr(2),
max: intPtr(3),
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
want: 3,
repo: "test/valid",
min: intPtr(2),
max: intPtr(3),
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
want: 3,
},
// 2 demanded, max at 3, currently 3, delay scaling down due to grace period
{
repo: "test/valid",
min: intPtr(2),
max: intPtr(3),
sReplicas: intPtr(3),
sTime: &metav1Now,
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
want: 3,
repo: "test/valid",
min: intPtr(2),
max: intPtr(3),
sReplicas: intPtr(3),
sTime: &metav1Now,
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
want: 3,
},
// 3 demanded, max at 2
{
repo: "test/valid",
min: intPtr(2),
max: intPtr(2),
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
want: 2,
repo: "test/valid",
min: intPtr(2),
max: intPtr(2),
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
want: 2,
},
// 2 demanded, min at 2
{
repo: "test/valid",
min: intPtr(2),
max: intPtr(3),
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
want: 2,
repo: "test/valid",
min: intPtr(2),
max: intPtr(3),
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
want: 2,
},
// 1 demanded, min at 2
{
repo: "test/valid",
min: intPtr(2),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
want: 2,
repo: "test/valid",
min: intPtr(2),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
want: 2,
},
// 1 demanded, min at 2
{
repo: "test/valid",
min: intPtr(2),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
want: 2,
repo: "test/valid",
min: intPtr(2),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
want: 2,
},
// 1 demanded, min at 1
{
repo: "test/valid",
min: intPtr(1),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
want: 1,
repo: "test/valid",
min: intPtr(1),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
want: 1,
},
// 1 demanded, min at 1
{
repo: "test/valid",
min: intPtr(1),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
want: 1,
repo: "test/valid",
min: intPtr(1),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
want: 1,
},
// fixed at 3
{
repo: "test/valid",
min: intPtr(1),
max: intPtr(3),
fixed: intPtr(3),
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}]}"`,
want: 3,
repo: "test/valid",
min: intPtr(1),
max: intPtr(3),
fixed: intPtr(3),
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
want: 3,
},
// Job-level autoscaling
// 5 requested from 3 workflows
{
repo: "test/valid",
min: intPtr(2),
max: intPtr(10),
workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"id": 1, "status":"queued"}]}"`,
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}]}"`,
repo: "test/valid",
min: intPtr(2),
max: intPtr(10),
workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
workflowJobs: map[int]string{
1: `{"jobs": [{"status":"queued"}, {"status":"queued"}]}`,
2: `{"jobs": [{"status": "in_progress"}, {"status":"completed"}]}`,
@@ -182,11 +157,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
_ = v1alpha1.AddToScheme(scheme)
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
server := fake.NewServer(
fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns, tc.workflowRuns_queued, tc.workflowRuns_in_progress),
fake.WithListWorkflowJobsResponse(200, tc.workflowJobs),
fake.WithListRunnersResponse(200, fake.RunnersListBody),
)
server := fake.NewServer(fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns), fake.WithListWorkflowJobsResponse(200, tc.workflowJobs))
defer server.Close()
client := newGithubClient(server)
@@ -204,15 +175,13 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
Spec: v1alpha1.RunnerDeploymentSpec{
Template: v1alpha1.RunnerTemplate{
Spec: v1alpha1.RunnerSpec{
RunnerConfig: v1alpha1.RunnerConfig{
Repository: tc.repo,
},
Repository: tc.repo,
},
},
Replicas: tc.fixed,
},
Status: v1alpha1.RunnerDeploymentStatus{
DesiredReplicas: tc.sReplicas,
Replicas: tc.sReplicas,
},
}
@@ -227,14 +196,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
},
}
minReplicas, _, _, err := h.getMinReplicas(log, metav1Now.Time, hra)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
st := h.scaleTargetFromRD(context.Background(), rd)
got, _, _, err := h.computeReplicasWithCache(log, metav1Now.Time, st, hra, minReplicas)
got, err := h.computeReplicas(rd, hra)
if err != nil {
if tc.err == "" {
t.Fatalf("unexpected error: expected none, got %v", err)
@@ -244,8 +206,12 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
return
}
if got != tc.want {
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, got)
if got == nil {
t.Fatalf("unexpected value of rs.Spec.Replicas: nil")
}
if *got != tc.want {
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, *got)
}
})
}
@@ -258,157 +224,129 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
metav1Now := metav1.Now()
testcases := []struct {
repos []string
org string
fixed *int
max *int
min *int
sReplicas *int
sTime *metav1.Time
workflowRuns string
workflowRuns_queued string
workflowRuns_in_progress string
repos []string
org string
fixed *int
max *int
min *int
sReplicas *int
sTime *metav1.Time
workflowRuns string
workflowJobs map[int]string
want int
err string
}{
// 3 demanded, max at 3
{
org: "test",
repos: []string{"valid"},
min: intPtr(2),
max: intPtr(3),
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
want: 3,
org: "test",
repos: []string{"valid"},
min: intPtr(2),
max: intPtr(3),
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
want: 3,
},
// 2 demanded, max at 3, currently 3, delay scaling down due to grace period
{
org: "test",
repos: []string{"valid"},
min: intPtr(2),
max: intPtr(3),
sReplicas: intPtr(3),
sTime: &metav1Now,
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
want: 3,
org: "test",
repos: []string{"valid"},
min: intPtr(2),
max: intPtr(3),
sReplicas: intPtr(3),
sTime: &metav1Now,
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
want: 3,
},
// 3 demanded, max at 2
{
org: "test",
repos: []string{"valid"},
min: intPtr(2),
max: intPtr(2),
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
want: 2,
org: "test",
repos: []string{"valid"},
min: intPtr(2),
max: intPtr(2),
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
want: 2,
},
// 2 demanded, min at 2
{
org: "test",
repos: []string{"valid"},
min: intPtr(2),
max: intPtr(3),
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
want: 2,
org: "test",
repos: []string{"valid"},
min: intPtr(2),
max: intPtr(3),
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
want: 2,
},
// 1 demanded, min at 2
{
org: "test",
repos: []string{"valid"},
min: intPtr(2),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
want: 2,
org: "test",
repos: []string{"valid"},
min: intPtr(2),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
want: 2,
},
// 1 demanded, min at 2
{
org: "test",
repos: []string{"valid"},
min: intPtr(2),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
want: 2,
org: "test",
repos: []string{"valid"},
min: intPtr(2),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
want: 2,
},
// 1 demanded, min at 1
{
org: "test",
repos: []string{"valid"},
min: intPtr(1),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
want: 1,
org: "test",
repos: []string{"valid"},
min: intPtr(1),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
want: 1,
},
// 1 demanded, min at 1
{
org: "test",
repos: []string{"valid"},
min: intPtr(1),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
want: 1,
org: "test",
repos: []string{"valid"},
min: intPtr(1),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
want: 1,
},
// fixed at 3
{
org: "test",
repos: []string{"valid"},
fixed: intPtr(1),
min: intPtr(1),
max: intPtr(3),
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"},{"status":"in_progress"},{"status":"in_progress"}]}"`,
want: 3,
org: "test",
repos: []string{"valid"},
fixed: intPtr(1),
min: intPtr(1),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
want: 3,
},
// org runner, fixed at 3
{
org: "test",
repos: []string{"valid"},
fixed: intPtr(1),
min: intPtr(1),
max: intPtr(3),
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"},{"status":"in_progress"},{"status":"in_progress"}]}"`,
want: 3,
org: "test",
repos: []string{"valid"},
fixed: intPtr(1),
min: intPtr(1),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
want: 3,
},
// org runner, 1 demanded, min at 1, no repos
{
org: "test",
min: intPtr(1),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
err: "validating autoscaling metrics: spec.autoscaling.metrics[].repositoryNames is required and must have one more more entries for organizational runner deployment",
org: "test",
min: intPtr(1),
max: intPtr(3),
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
err: "validating autoscaling metrics: spec.autoscaling.metrics[].repositoryNames is required and must have one more more entries for organizational runner deployment",
},
// Job-level autoscaling
// 5 requested from 3 workflows
{
org: "test",
repos: []string{"valid"},
min: intPtr(2),
max: intPtr(10),
workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"id": 1, "status":"queued"}]}"`,
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
org: "test",
repos: []string{"valid"},
min: intPtr(2),
max: intPtr(10),
workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
workflowJobs: map[int]string{
1: `{"jobs": [{"status":"queued"}, {"status":"queued"}]}`,
2: `{"jobs": [{"status": "in_progress"}, {"status":"completed"}]}`,
@@ -430,13 +368,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
_ = v1alpha1.AddToScheme(scheme)
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
t.Helper()
server := fake.NewServer(
fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns, tc.workflowRuns_queued, tc.workflowRuns_in_progress),
fake.WithListWorkflowJobsResponse(200, tc.workflowJobs),
fake.WithListRunnersResponse(200, fake.RunnersListBody),
)
server := fake.NewServer(fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns), fake.WithListWorkflowJobsResponse(200, tc.workflowJobs))
defer server.Close()
client := newGithubClient(server)
@@ -451,27 +383,15 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
Name: "testrd",
},
Spec: v1alpha1.RunnerDeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"foo": "bar",
},
},
Template: v1alpha1.RunnerTemplate{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1alpha1.RunnerSpec{
RunnerConfig: v1alpha1.RunnerConfig{
Organization: tc.org,
},
Organization: tc.org,
},
},
Replicas: tc.fixed,
},
Status: v1alpha1.RunnerDeploymentStatus{
DesiredReplicas: tc.sReplicas,
Replicas: tc.sReplicas,
},
}
@@ -495,14 +415,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
},
}
minReplicas, _, _, err := h.getMinReplicas(log, metav1Now.Time, hra)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
st := h.scaleTargetFromRD(context.Background(), rd)
got, _, _, err := h.computeReplicasWithCache(log, metav1Now.Time, st, hra, minReplicas)
got, err := h.computeReplicas(rd, hra)
if err != nil {
if tc.err == "" {
t.Fatalf("unexpected error: expected none, got %v", err)
@@ -512,8 +425,12 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
return
}
if got != tc.want {
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, got)
if got == nil {
t.Fatalf("unexpected value of rs.Spec.Replicas: nil, wanted %v", tc.want)
}
if *got != tc.want {
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, *got)
}
})
}

View File

@@ -1,672 +0,0 @@
/*
Copyright 2020 The actions-runner-controller authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/go-logr/logr"
gogithub "github.com/google/go-github/v37/github"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
)
const (
scaleTargetKey = "scaleTarget"
)
// HorizontalRunnerAutoscalerGitHubWebhook autoscales a HorizontalRunnerAutoscaler and the RunnerDeployment on each
// GitHub Webhook received
type HorizontalRunnerAutoscalerGitHubWebhook struct {
client.Client
Log logr.Logger
Recorder record.EventRecorder
Scheme *runtime.Scheme
// SecretKeyBytes is the byte representation of the Webhook secret token
// the administrator is generated and specified in GitHub Web UI.
SecretKeyBytes []byte
// Namespace is the namespace to watch for HorizontalRunnerAutoscaler's to be
// scaled on Webhook.
// Set to empty for letting it watch for all namespaces.
Namespace string
Name string
}
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) {
return ctrl.Result{}, nil
}
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers/finalizers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.ResponseWriter, r *http.Request) {
var (
ok bool
err error
)
defer func() {
if !ok {
w.WriteHeader(http.StatusInternalServerError)
if err != nil {
msg := err.Error()
if written, err := w.Write([]byte(msg)); err != nil {
autoscaler.Log.Error(err, "failed writing http error response", "msg", msg, "written", written)
}
}
}
}()
defer func() {
if r.Body != nil {
r.Body.Close()
}
}()
// respond ok to GET / e.g. for health check
if r.Method == http.MethodGet {
fmt.Fprintln(w, "webhook server is running")
return
}
var payload []byte
if len(autoscaler.SecretKeyBytes) > 0 {
payload, err = gogithub.ValidatePayload(r, autoscaler.SecretKeyBytes)
if err != nil {
autoscaler.Log.Error(err, "error validating request body")
return
}
} else {
payload, err = ioutil.ReadAll(r.Body)
if err != nil {
autoscaler.Log.Error(err, "error reading request body")
return
}
}
webhookType := gogithub.WebHookType(r)
event, err := gogithub.ParseWebHook(webhookType, payload)
if err != nil {
var s string
if payload != nil {
s = string(payload)
}
autoscaler.Log.Error(err, "could not parse webhook", "webhookType", webhookType, "payload", s)
return
}
var target *ScaleTarget
log := autoscaler.Log.WithValues(
"event", webhookType,
"hookID", r.Header.Get("X-GitHub-Hook-ID"),
"delivery", r.Header.Get("X-GitHub-Delivery"),
)
switch e := event.(type) {
case *gogithub.PushEvent:
target, err = autoscaler.getScaleUpTarget(
context.TODO(),
log,
e.Repo.GetName(),
e.Repo.Owner.GetLogin(),
e.Repo.Owner.GetType(),
autoscaler.MatchPushEvent(e),
)
case *gogithub.PullRequestEvent:
target, err = autoscaler.getScaleUpTarget(
context.TODO(),
log,
e.Repo.GetName(),
e.Repo.Owner.GetLogin(),
e.Repo.Owner.GetType(),
autoscaler.MatchPullRequestEvent(e),
)
if pullRequest := e.PullRequest; pullRequest != nil {
log = log.WithValues(
"pullRequest.base.ref", e.PullRequest.Base.GetRef(),
"action", e.GetAction(),
)
}
case *gogithub.CheckRunEvent:
target, err = autoscaler.getScaleUpTarget(
context.TODO(),
log,
e.Repo.GetName(),
e.Repo.Owner.GetLogin(),
e.Repo.Owner.GetType(),
autoscaler.MatchCheckRunEvent(e),
)
if checkRun := e.GetCheckRun(); checkRun != nil {
log = log.WithValues(
"checkRun.status", checkRun.GetStatus(),
"action", e.GetAction(),
)
}
case *gogithub.WorkflowJobEvent:
if workflowJob := e.GetWorkflowJob(); workflowJob != nil {
log = log.WithValues(
"workflowJob.status", workflowJob.GetStatus(),
"workflowJob.labels", workflowJob.Labels,
"repository.name", e.Repo.GetName(),
"repository.owner.login", e.Repo.Owner.GetLogin(),
"repository.owner.type", e.Repo.Owner.GetType(),
"action", e.GetAction(),
)
}
labels := e.WorkflowJob.Labels
switch e.GetAction() {
case "queued", "completed":
target, err = autoscaler.getJobScaleUpTargetForRepoOrOrg(
context.TODO(),
log,
e.Repo.GetName(),
e.Repo.Owner.GetLogin(),
e.Repo.Owner.GetType(),
labels,
)
if target != nil {
if e.GetAction() == "queued" {
target.Amount = 1
} else if e.GetAction() == "completed" {
// A nagative amount is processed in the tryScale func as a scale-down request,
// that erasese the oldest CapacityReservation with the same amount.
// If the first CapacityReservation was with Replicas=1, this negative scale target erases that,
// so that the resulting desired replicas decreases by 1.
target.Amount = -1
}
}
default:
}
case *gogithub.PingEvent:
ok = true
w.WriteHeader(http.StatusOK)
msg := "pong"
if written, err := w.Write([]byte(msg)); err != nil {
log.Error(err, "failed writing http response", "msg", msg, "written", written)
}
log.Info("received ping event")
return
default:
log.Info("unknown event type", "eventType", webhookType)
return
}
if err != nil {
log.Error(err, "handling check_run event")
return
}
if target == nil {
log.Info(
"Scale target not found. If this is unexpected, ensure that there is exactly one repository-wide or organizational runner deployment that matches this webhook event",
)
msg := "no horizontalrunnerautoscaler to scale for this github event"
ok = true
w.WriteHeader(http.StatusOK)
if written, err := w.Write([]byte(msg)); err != nil {
log.Error(err, "failed writing http response", "msg", msg, "written", written)
}
return
}
if err := autoscaler.tryScale(context.TODO(), target); err != nil {
log.Error(err, "could not scale up")
return
}
ok = true
w.WriteHeader(http.StatusOK)
msg := fmt.Sprintf("scaled %s by %d", target.Name, target.Amount)
autoscaler.Log.Info(msg)
if written, err := w.Write([]byte(msg)); err != nil {
log.Error(err, "failed writing http response", "msg", msg, "written", written)
}
}
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) findHRAsByKey(ctx context.Context, value string) ([]v1alpha1.HorizontalRunnerAutoscaler, error) {
ns := autoscaler.Namespace
var defaultListOpts []client.ListOption
if ns != "" {
defaultListOpts = append(defaultListOpts, client.InNamespace(ns))
}
var hras []v1alpha1.HorizontalRunnerAutoscaler
if value != "" {
opts := append([]client.ListOption{}, defaultListOpts...)
opts = append(opts, client.MatchingFields{scaleTargetKey: value})
if autoscaler.Namespace != "" {
opts = append(opts, client.InNamespace(autoscaler.Namespace))
}
var hraList v1alpha1.HorizontalRunnerAutoscalerList
if err := autoscaler.List(ctx, &hraList, opts...); err != nil {
return nil, err
}
for _, d := range hraList.Items {
hras = append(hras, d)
}
}
return hras, nil
}
func matchTriggerConditionAgainstEvent(types []string, eventAction *string) bool {
if len(types) == 0 {
return true
}
if eventAction == nil {
return false
}
for _, tpe := range types {
if tpe == *eventAction {
return true
}
}
return false
}
type ScaleTarget struct {
v1alpha1.HorizontalRunnerAutoscaler
v1alpha1.ScaleUpTrigger
}
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) searchScaleTargets(hras []v1alpha1.HorizontalRunnerAutoscaler, f func(v1alpha1.ScaleUpTrigger) bool) []ScaleTarget {
var matched []ScaleTarget
for _, hra := range hras {
if !hra.ObjectMeta.DeletionTimestamp.IsZero() {
continue
}
for _, scaleUpTrigger := range hra.Spec.ScaleUpTriggers {
if !f(scaleUpTrigger) {
continue
}
matched = append(matched, ScaleTarget{
HorizontalRunnerAutoscaler: hra,
ScaleUpTrigger: scaleUpTrigger,
})
}
}
return matched
}
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleTarget(ctx context.Context, name string, f func(v1alpha1.ScaleUpTrigger) bool) (*ScaleTarget, error) {
hras, err := autoscaler.findHRAsByKey(ctx, name)
if err != nil {
return nil, err
}
autoscaler.Log.V(1).Info(fmt.Sprintf("Found %d HRAs by key", len(hras)), "key", name)
targets := autoscaler.searchScaleTargets(hras, f)
n := len(targets)
if n == 0 {
return nil, nil
}
if n > 1 {
var scaleTargetIDs []string
for _, t := range targets {
scaleTargetIDs = append(scaleTargetIDs, t.HorizontalRunnerAutoscaler.Name)
}
autoscaler.Log.Info(
"Found too many scale targets: "+
"It must be exactly one to avoid ambiguity. "+
"Either set Namespace for the webhook-based autoscaler to let it only find HRAs in the namespace, "+
"or update Repository or Organization fields in your RunnerDeployment resources to fix the ambiguity.",
"scaleTargets", strings.Join(scaleTargetIDs, ","))
return nil, nil
}
return &targets[0], nil
}
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleUpTarget(ctx context.Context, log logr.Logger, repo, owner, ownerType string, f func(v1alpha1.ScaleUpTrigger) bool) (*ScaleTarget, error) {
repositoryRunnerKey := owner + "/" + repo
if target, err := autoscaler.getScaleTarget(ctx, repositoryRunnerKey, f); err != nil {
log.Info("finding repository-wide runner", "repository", repositoryRunnerKey)
return nil, err
} else if target != nil {
log.Info("scale up target is repository-wide runners", "repository", repo)
return target, nil
}
if ownerType == "User" {
log.V(1).Info("no repository runner found", "organization", owner)
return nil, nil
}
if target, err := autoscaler.getScaleTarget(ctx, owner, f); err != nil {
log.Info("finding organizational runner", "organization", owner)
return nil, err
} else if target != nil {
log.Info("scale up target is organizational runners", "organization", owner)
return target, nil
} else {
log.V(1).Info("no repository runner or organizational runner found",
"repository", repositoryRunnerKey,
"organization", owner,
)
}
return nil, nil
}
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getJobScaleUpTargetForRepoOrOrg(ctx context.Context, log logr.Logger, repo, owner, ownerType string, labels []string) (*ScaleTarget, error) {
repositoryRunnerKey := owner + "/" + repo
if target, err := autoscaler.getJobScaleTarget(ctx, repositoryRunnerKey, labels); err != nil {
log.Info("finding repository-wide runner", "repository", repositoryRunnerKey)
return nil, err
} else if target != nil {
log.Info("job scale up target is repository-wide runners", "repository", repo)
return target, nil
}
if ownerType == "User" {
log.V(1).Info("no repository runner found", "organization", owner)
return nil, nil
}
if target, err := autoscaler.getJobScaleTarget(ctx, owner, labels); err != nil {
log.Info("finding organizational runner", "organization", owner)
return nil, err
} else if target != nil {
log.Info("job scale up target is organizational runners", "organization", owner)
return target, nil
} else {
log.V(1).Info("no repository runner or organizational runner found",
"repository", repositoryRunnerKey,
"organization", owner,
)
}
return nil, nil
}
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getJobScaleTarget(ctx context.Context, name string, labels []string) (*ScaleTarget, error) {
hras, err := autoscaler.findHRAsByKey(ctx, name)
if err != nil {
return nil, err
}
autoscaler.Log.V(1).Info(fmt.Sprintf("Found %d HRAs by key", len(hras)), "key", name)
HRA:
for _, hra := range hras {
if !hra.ObjectMeta.DeletionTimestamp.IsZero() {
continue
}
if len(hra.Spec.ScaleUpTriggers) > 1 {
autoscaler.Log.V(1).Info("Skipping this HRA as it has too many ScaleUpTriggers to be used in workflow_job based scaling", "hra", hra.Name)
continue
}
var duration metav1.Duration
if len(hra.Spec.ScaleUpTriggers) > 0 {
duration = hra.Spec.ScaleUpTriggers[0].Duration
}
if duration.Duration <= 0 {
// Try to release the reserved capacity after at least 10 minutes by default,
// we won't end up in the reserved capacity remained forever in case GitHub somehow stopped sending us "completed" workflow_job events.
// GitHub usually send us those but nothing is 100% guaranteed, e.g. in case of something went wrong on GitHub :)
// Probably we'd better make this configurable via custom resources in the future?
duration.Duration = 10 * time.Minute
}
switch hra.Spec.ScaleTargetRef.Kind {
case "RunnerSet":
var rs v1alpha1.RunnerSet
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
return nil, err
}
if len(labels) == 1 && labels[0] == "self-hosted" {
return &ScaleTarget{HorizontalRunnerAutoscaler: hra, ScaleUpTrigger: v1alpha1.ScaleUpTrigger{Duration: duration}}, nil
}
// Ensure that the RunnerSet-managed runners have all the labels requested by the workflow_job.
for _, l := range labels {
var matched bool
for _, l2 := range rs.Spec.Labels {
if l == l2 {
matched = true
break
}
}
if !matched {
continue HRA
}
}
return &ScaleTarget{HorizontalRunnerAutoscaler: hra, ScaleUpTrigger: v1alpha1.ScaleUpTrigger{Duration: duration}}, nil
case "RunnerDeployment", "":
var rd v1alpha1.RunnerDeployment
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
return nil, err
}
if len(labels) == 1 && labels[0] == "self-hosted" {
return &ScaleTarget{HorizontalRunnerAutoscaler: hra, ScaleUpTrigger: v1alpha1.ScaleUpTrigger{Duration: duration}}, nil
}
// Ensure that the RunnerDeployment-managed runners have all the labels requested by the workflow_job.
for _, l := range labels {
var matched bool
for _, l2 := range rd.Spec.Template.Labels {
if l == l2 {
matched = true
break
}
}
if !matched {
continue HRA
}
}
return &ScaleTarget{HorizontalRunnerAutoscaler: hra, ScaleUpTrigger: v1alpha1.ScaleUpTrigger{Duration: duration}}, nil
default:
return nil, fmt.Errorf("unsupported scaleTargetRef.kind: %v", hra.Spec.ScaleTargetRef.Kind)
}
}
return nil, nil
}
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) tryScale(ctx context.Context, target *ScaleTarget) error {
if target == nil {
return nil
}
copy := target.HorizontalRunnerAutoscaler.DeepCopy()
amount := 1
if target.ScaleUpTrigger.Amount != 0 {
amount = target.ScaleUpTrigger.Amount
}
capacityReservations := getValidCapacityReservations(copy)
if amount > 0 {
copy.Spec.CapacityReservations = append(capacityReservations, v1alpha1.CapacityReservation{
ExpirationTime: metav1.Time{Time: time.Now().Add(target.ScaleUpTrigger.Duration.Duration)},
Replicas: amount,
})
} else if amount < 0 {
var reservations []v1alpha1.CapacityReservation
var found bool
for _, r := range capacityReservations {
if !found && r.Replicas+amount == 0 {
found = true
} else {
reservations = append(reservations, r)
}
}
copy.Spec.CapacityReservations = reservations
}
autoscaler.Log.Info(
"Patching hra for capacityReservations update",
"before", target.HorizontalRunnerAutoscaler.Spec.CapacityReservations,
"after", copy.Spec.CapacityReservations,
)
if err := autoscaler.Client.Patch(ctx, copy, client.MergeFrom(&target.HorizontalRunnerAutoscaler)); err != nil {
return fmt.Errorf("patching horizontalrunnerautoscaler to add capacity reservation: %w", err)
}
return nil
}
func getValidCapacityReservations(autoscaler *v1alpha1.HorizontalRunnerAutoscaler) []v1alpha1.CapacityReservation {
var capacityReservations []v1alpha1.CapacityReservation
now := time.Now()
for _, reservation := range autoscaler.Spec.CapacityReservations {
if reservation.ExpirationTime.Time.After(now) {
capacityReservations = append(capacityReservations, reservation)
}
}
return capacityReservations
}
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) SetupWithManager(mgr ctrl.Manager) error {
name := "webhookbasedautoscaler"
if autoscaler.Name != "" {
name = autoscaler.Name
}
autoscaler.Recorder = mgr.GetEventRecorderFor(name)
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &v1alpha1.HorizontalRunnerAutoscaler{}, scaleTargetKey, func(rawObj client.Object) []string {
hra := rawObj.(*v1alpha1.HorizontalRunnerAutoscaler)
if hra.Spec.ScaleTargetRef.Name == "" {
return nil
}
switch hra.Spec.ScaleTargetRef.Kind {
case "", "RunnerDeployment":
var rd v1alpha1.RunnerDeployment
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
return nil
}
return []string{rd.Spec.Template.Spec.Repository, rd.Spec.Template.Spec.Organization}
case "RunnerSet":
var rs v1alpha1.RunnerSet
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
return nil
}
return []string{rs.Spec.Repository, rs.Spec.Organization}
}
return nil
}); err != nil {
return err
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.HorizontalRunnerAutoscaler{}).
Named(name).
Complete(autoscaler)
}

View File

@@ -1,53 +0,0 @@
package controllers
import (
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
"github.com/actions-runner-controller/actions-runner-controller/pkg/actionsglob"
"github.com/google/go-github/v37/github"
)
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchCheckRunEvent(event *github.CheckRunEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
return func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
g := scaleUpTrigger.GitHubEvent
if g == nil {
return false
}
cr := g.CheckRun
if cr == nil {
return false
}
if !matchTriggerConditionAgainstEvent(cr.Types, event.Action) {
return false
}
if cr.Status != "" && (event.CheckRun == nil || event.CheckRun.Status == nil || *event.CheckRun.Status != cr.Status) {
return false
}
if checkRun := event.CheckRun; checkRun != nil && len(cr.Names) > 0 {
for _, pat := range cr.Names {
if r := actionsglob.Match(pat, checkRun.GetName()); r {
return true
}
}
return false
}
if len(scaleUpTrigger.GitHubEvent.CheckRun.Repositories) > 0 {
for _, repository := range scaleUpTrigger.GitHubEvent.CheckRun.Repositories {
if repository == *event.Repo.Name {
return true
}
}
return false
}
return true
}
}

View File

@@ -1,32 +0,0 @@
package controllers
import (
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
"github.com/google/go-github/v37/github"
)
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPullRequestEvent(event *github.PullRequestEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
return func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
g := scaleUpTrigger.GitHubEvent
if g == nil {
return false
}
pr := g.PullRequest
if pr == nil {
return false
}
if !matchTriggerConditionAgainstEvent(pr.Types, event.Action) {
return false
}
if !matchTriggerConditionAgainstEvent(pr.Branches, event.PullRequest.Base.Ref) {
return false
}
return true
}
}

View File

@@ -1,24 +0,0 @@
package controllers
import (
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
"github.com/google/go-github/v37/github"
)
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPushEvent(event *github.PushEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
return func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
g := scaleUpTrigger.GitHubEvent
if g == nil {
return false
}
push := g.Push
if push == nil {
return false
}
return true
}
}

Some files were not shown because too many files have changed in this diff Show More