mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 19:50:30 +00:00
Compare commits
167 Commits
actions-ru
...
actions-ru
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
332548093a | ||
|
|
b4e143dadc | ||
|
|
93c4dd856e | ||
|
|
93aea48c38 | ||
|
|
14b17cca73 | ||
|
|
5298c6ea29 | ||
|
|
8fa08d59b1 | ||
|
|
003c552c34 | ||
|
|
83370d7f95 | ||
|
|
7da9d0ae19 | ||
|
|
b56fa6a748 | ||
|
|
a22ee8a5f1 | ||
|
|
e1762ba746 | ||
|
|
710e2fbc3a | ||
|
|
433552770e | ||
|
|
ba2a32eef6 | ||
|
|
de0d7ad78c | ||
|
|
0382f3bbd5 | ||
|
|
b6640a033c | ||
|
|
998c028d90 | ||
|
|
f8dffab19d | ||
|
|
7ff5b7da8c | ||
|
|
6aaff4ecee | ||
|
|
437d0173b0 | ||
|
|
a389292478 | ||
|
|
6eadb03669 | ||
|
|
aa60021ab0 | ||
|
|
50e26bd2f6 | ||
|
|
2dd13b4a19 | ||
|
|
35af24cf03 | ||
|
|
ca96b66fbe | ||
|
|
4db5fbc7a1 | ||
|
|
add83bc7bc | ||
|
|
666bba784c | ||
|
|
0672ff0ff9 | ||
|
|
f2f22827a6 | ||
|
|
a3a801757d | ||
|
|
0c003f20d4 | ||
|
|
863760828a | ||
|
|
517fae4119 | ||
|
|
d9e1e64dc6 | ||
|
|
3c4ab2d479 | ||
|
|
3ca96557a6 | ||
|
|
5fd6ec4bc8 | ||
|
|
6863bdb208 | ||
|
|
f3fcb428ae | ||
|
|
41bae32a9f | ||
|
|
e4879e7ae4 | ||
|
|
e5bb130fda | ||
|
|
e7a21cfc53 | ||
|
|
8f54644b08 | ||
|
|
c56d6a6c85 | ||
|
|
a96c3e1102 | ||
|
|
d29de8d454 | ||
|
|
12c4d96250 | ||
|
|
46a13c0626 | ||
|
|
e32a8054d0 | ||
|
|
0deb6809b9 | ||
|
|
21af1ec19d | ||
|
|
dfadb86d66 | ||
|
|
c91e76f169 | ||
|
|
718232f8f4 | ||
|
|
c7f5f7d161 | ||
|
|
33bb6902bc | ||
|
|
aeb0601147 | ||
|
|
991c0b3211 | ||
|
|
71da6d5271 | ||
|
|
e4fd4bc99c | ||
|
|
d9a8dc7e84 | ||
|
|
795cf8b1de | ||
|
|
0615c2adb1 | ||
|
|
a918e56ece | ||
|
|
546b5251ed | ||
|
|
74dda4ea1b | ||
|
|
b29816290a | ||
|
|
921daff61b | ||
|
|
e233f7ad6a | ||
|
|
623c84fa52 | ||
|
|
d4fb6204cb | ||
|
|
f8e07c7fe4 | ||
|
|
f73713859c | ||
|
|
e0a7be253e | ||
|
|
915739b972 | ||
|
|
4925880e5e | ||
|
|
c143fd50b5 | ||
|
|
dbd668ae2d | ||
|
|
5c1be3265b | ||
|
|
ebcd838501 | ||
|
|
6ef276b239 | ||
|
|
f70f325f48 | ||
|
|
f7c336f9dd | ||
|
|
ae380f5987 | ||
|
|
4bf1c12a98 | ||
|
|
cb561d8db4 | ||
|
|
eaf6d2f2e2 | ||
|
|
5ae7ce16e0 | ||
|
|
bdcde44642 | ||
|
|
5116e3800e | ||
|
|
4e107a4e50 | ||
|
|
93238697d9 | ||
|
|
48f62b4c89 | ||
|
|
ea94b3cc5b | ||
|
|
0cac005ab2 | ||
|
|
55ca7bfdf5 | ||
|
|
ca97f39fcb | ||
|
|
f0c8c07428 | ||
|
|
e54edea918 | ||
|
|
e58f82bfce | ||
|
|
244e0dd987 | ||
|
|
02009cef17 | ||
|
|
2b5af62184 | ||
|
|
ec58ad19e0 | ||
|
|
cc9fe33ef5 | ||
|
|
4a5a85fd61 | ||
|
|
56b26fd751 | ||
|
|
36e95dad47 | ||
|
|
3724b46033 | ||
|
|
538e2783d7 | ||
|
|
72ca998266 | ||
|
|
d439ed5c81 | ||
|
|
58c2bdf2bb | ||
|
|
fe9164b025 | ||
|
|
06141b39b4 | ||
|
|
ac4c3fd365 | ||
|
|
dc29e31bcc | ||
|
|
784019f3d7 | ||
|
|
fc55477c1c | ||
|
|
3f78f71137 | ||
|
|
e511401e51 | ||
|
|
37aa1a0b8c | ||
|
|
bea0775bec | ||
|
|
79a494b2aa | ||
|
|
97404144eb | ||
|
|
b77489d098 | ||
|
|
4152afbd30 | ||
|
|
29f621e1c8 | ||
|
|
5651ba6ead | ||
|
|
759cc4b47f | ||
|
|
4ede0c18d0 | ||
|
|
9091d9b756 | ||
|
|
a09c2564d9 | ||
|
|
a555c90fd5 | ||
|
|
38644cf4e8 | ||
|
|
23f357db10 | ||
|
|
584745b67d | ||
|
|
df9592dc99 | ||
|
|
8071ac7066 | ||
|
|
3c33eca501 | ||
|
|
aa827474b2 | ||
|
|
c75c9f9226 | ||
|
|
c09a04ec01 | ||
|
|
618276e3d3 | ||
|
|
18dd89c884 | ||
|
|
98b17dc0a5 | ||
|
|
c658dcfa6d | ||
|
|
c4996d4bbd | ||
|
|
7a3fa4f362 | ||
|
|
1bfd743e69 | ||
|
|
734f3bd63a | ||
|
|
409dc4c114 | ||
|
|
4b9a6c6700 | ||
|
|
86e1a4a8f3 | ||
|
|
544d620bc3 | ||
|
|
1cfe1974c4 | ||
|
|
7e4b6ebd6d | ||
|
|
11cb9b7882 | ||
|
|
10b88bf070 |
28
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
28
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,8 +1,18 @@
|
|||||||
name: Bug Report
|
name: Bug Report
|
||||||
description: File a bug report
|
description: File a bug report
|
||||||
title: "Bug"
|
title: "<Please write what didn't work for you here>"
|
||||||
labels: ["bug"]
|
labels: ["bug"]
|
||||||
body:
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
id: read-troubleshooting-guide
|
||||||
|
attributes:
|
||||||
|
label: Checks
|
||||||
|
description: Please check all the boxes below before submitting
|
||||||
|
options:
|
||||||
|
- label: I've already read https://github.com/actions-runner-controller/actions-runner-controller/blob/master/TROUBLESHOOTING.md and I'm sure my issue is not covered in the troubleshooting guide.
|
||||||
|
required: true
|
||||||
|
- label: I'm not using a custom entrypoint in my runner image
|
||||||
|
required: true
|
||||||
- type: input
|
- type: input
|
||||||
id: controller-version
|
id: controller-version
|
||||||
attributes:
|
attributes:
|
||||||
@@ -50,7 +60,7 @@ body:
|
|||||||
id: checks
|
id: checks
|
||||||
attributes:
|
attributes:
|
||||||
label: Checks
|
label: Checks
|
||||||
description: Please check the boxes below before submitting
|
description: Please check all the boxes below before submitting
|
||||||
options:
|
options:
|
||||||
- label: This isn't a question or user support case (For Q&A and community support, go to [Discussions](https://github.com/actions-runner-controller/actions-runner-controller/discussions). It might also be a good idea to contract with any of contributors and maintainers if your business is so critical and therefore you need priority support
|
- label: This isn't a question or user support case (For Q&A and community support, go to [Discussions](https://github.com/actions-runner-controller/actions-runner-controller/discussions). It might also be a good idea to contract with any of contributors and maintainers if your business is so critical and therefore you need priority support
|
||||||
required: true
|
required: true
|
||||||
@@ -60,6 +70,8 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've already upgraded ARC (including the CRDs, see charts/actions-runner-controller/docs/UPGRADING.md for details) to the latest and it didn't fix the issue
|
- label: I've already upgraded ARC (including the CRDs, see charts/actions-runner-controller/docs/UPGRADING.md for details) to the latest and it didn't fix the issue
|
||||||
required: true
|
required: true
|
||||||
|
- label: I've migrated to the workflow job webhook event (if you using webhook driven scaling)
|
||||||
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: resource-definitions
|
id: resource-definitions
|
||||||
attributes:
|
attributes:
|
||||||
@@ -129,8 +141,8 @@ body:
|
|||||||
- type: textarea
|
- type: textarea
|
||||||
id: controller-logs
|
id: controller-logs
|
||||||
attributes:
|
attributes:
|
||||||
label: Controller Logs
|
label: Whole Controller Logs
|
||||||
description: "NEVER EVER OMIT THIS! Include logs from `actions-runner-controller`'s controller-manager pod"
|
description: "NEVER EVER OMIT THIS! Include logs from `actions-runner-controller`'s controller-manager pod. Don't omit the parts you think irrelevant!"
|
||||||
render: shell
|
render: shell
|
||||||
placeholder: |
|
placeholder: |
|
||||||
PROVIDE THE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
PROVIDE THE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
||||||
@@ -149,11 +161,11 @@ body:
|
|||||||
- type: textarea
|
- type: textarea
|
||||||
id: runner-pod-logs
|
id: runner-pod-logs
|
||||||
attributes:
|
attributes:
|
||||||
label: Runner Pod Logs
|
label: Whole Runner Pod Logs
|
||||||
description: "Include logs from runner pod(s)"
|
description: "Include logs from runner pod(s). Please don't omit the parts you think irrelevant!"
|
||||||
render: shell
|
render: shell
|
||||||
placeholder: |
|
placeholder: |
|
||||||
PROVIDE THE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
PROVIDE THE WHOLE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
||||||
|
|
||||||
To grab the runner pod logs:
|
To grab the runner pod logs:
|
||||||
|
|
||||||
@@ -165,6 +177,8 @@ body:
|
|||||||
|
|
||||||
kubectl -n $NS logs $POD_NAME -c runner > runnerpod_runner.log
|
kubectl -n $NS logs $POD_NAME -c runner > runnerpod_runner.log
|
||||||
kubectl -n $NS logs $POD_NAME -c docker > runnerpod_docker.log
|
kubectl -n $NS logs $POD_NAME -c docker > runnerpod_docker.log
|
||||||
|
|
||||||
|
If any of the containers are getting terminated immediately, try adding `--previous` to the kubectl-logs command to obtain logs emitted before the termination.
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
|
|||||||
3
.github/ISSUE_TEMPLATE/config.yml
vendored
3
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,5 +1,4 @@
|
|||||||
# Blank issues are mainly for maintainers who are known to write complete issue descriptions without need to following a form
|
blank_issues_enabled: false
|
||||||
blank_issues_enabled: true
|
|
||||||
contact_links:
|
contact_links:
|
||||||
- name: Sponsor ARC Maintainers
|
- name: Sponsor ARC Maintainers
|
||||||
about: If your business relies on the continued maintainance of actions-runner-controller, please consider sponsoring the project and the maintainers.
|
about: If your business relies on the continued maintainance of actions-runner-controller, please consider sponsoring the project and the maintainers.
|
||||||
|
|||||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,19 +1,21 @@
|
|||||||
---
|
---
|
||||||
name: Feature request
|
name: Feature request
|
||||||
about: Suggest an idea for this project
|
about: Suggest an idea for this project
|
||||||
|
labels: enhancement
|
||||||
title: ''
|
title: ''
|
||||||
assignees: ''
|
assignees: ''
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
**Is your feature request related to a problem? Please describe.**
|
### What would you like added?
|
||||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
|
||||||
|
|
||||||
**Describe the solution you'd like**
|
*A clear and concise description of what you want to happen.*
|
||||||
A clear and concise description of what you want to happen.
|
|
||||||
|
|
||||||
**Describe alternatives you've considered**
|
Note: Feature requests to integrate vendor specific cloud tools (e.g. `awscli`, `gcloud-sdk`, `azure-cli`) will likely be rejected as the Runner image aims to be vendor agnostic.
|
||||||
A clear and concise description of any alternative solutions or features you've considered.
|
|
||||||
|
|
||||||
**Additional context**
|
### Why is this needed?
|
||||||
Add any other context or screenshots about the feature request here.
|
|
||||||
|
*A clear and concise description of any alternative solutions or features you've considered.*
|
||||||
|
|
||||||
|
### Additional context
|
||||||
|
|
||||||
|
*Add any other context or screenshots about the feature request here.*
|
||||||
|
|||||||
3
.github/renovate.json5
vendored
3
.github/renovate.json5
vendored
@@ -31,7 +31,8 @@
|
|||||||
{
|
{
|
||||||
"fileMatch": [
|
"fileMatch": [
|
||||||
"runner/actions-runner.dockerfile",
|
"runner/actions-runner.dockerfile",
|
||||||
"runner/actions-runner-dind.dockerfile"
|
"runner/actions-runner-dind.dockerfile",
|
||||||
|
"runner/actions-runner-dind-rootless.dockerfile"
|
||||||
],
|
],
|
||||||
"matchStrings": ["RUNNER_VERSION=+(?<currentValue>.*?)\\n"],
|
"matchStrings": ["RUNNER_VERSION=+(?<currentValue>.*?)\\n"],
|
||||||
"depNameTemplate": "actions/runner",
|
"depNameTemplate": "actions/runner",
|
||||||
|
|||||||
23
.github/workflows/golangci-lint.yaml
vendored
Normal file
23
.github/workflows/golangci-lint.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
name: golangci-lint
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
pull_request:
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pull-requests: read
|
||||||
|
jobs:
|
||||||
|
golangci:
|
||||||
|
name: lint
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: 1.19
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: golangci-lint
|
||||||
|
uses: golangci/golangci-lint-action@v3
|
||||||
|
with:
|
||||||
|
only-new-issues: true
|
||||||
|
version: v1.49.0
|
||||||
1
.github/workflows/publish-arc.yaml
vendored
1
.github/workflows/publish-arc.yaml
vendored
@@ -58,6 +58,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
file: Dockerfile
|
file: Dockerfile
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
|
build-args: VERSION=${{ env.VERSION }}
|
||||||
push: true
|
push: true
|
||||||
tags: |
|
tags: |
|
||||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:latest
|
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:latest
|
||||||
|
|||||||
7
.github/workflows/publish-canary.yaml
vendored
7
.github/workflows/publish-canary.yaml
vendored
@@ -22,11 +22,11 @@ on:
|
|||||||
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
|
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
packages: write
|
packages: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
canary-build:
|
canary-build:
|
||||||
name: Build and Publish Canary Image
|
name: Build and Publish Canary Image
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
env:
|
env:
|
||||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
||||||
@@ -50,9 +50,10 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
file: Dockerfile
|
file: Dockerfile
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
|
build-args: VERSION=canary-${{ github.sha }}
|
||||||
push: true
|
push: true
|
||||||
tags: |
|
tags: |
|
||||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:canary
|
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:canary
|
||||||
ghcr.io/actions-runner-controller/actions-runner-controller:canary
|
ghcr.io/${{ github.repository }}:canary
|
||||||
cache-from: type=gha,scope=arc-canary
|
cache-from: type=gha,scope=arc-canary
|
||||||
cache-to: type=gha,mode=max,scope=arc-canary
|
cache-to: type=gha,mode=max,scope=arc-canary
|
||||||
|
|||||||
8
.github/workflows/publish-chart.yaml
vendored
8
.github/workflows/publish-chart.yaml
vendored
@@ -31,7 +31,7 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Set up Helm
|
- name: Set up Helm
|
||||||
uses: azure/setup-helm@v3.0
|
uses: azure/setup-helm@v3.3
|
||||||
with:
|
with:
|
||||||
version: ${{ env.HELM_VERSION }}
|
version: ${{ env.HELM_VERSION }}
|
||||||
|
|
||||||
@@ -57,7 +57,7 @@ jobs:
|
|||||||
python-version: '3.7'
|
python-version: '3.7'
|
||||||
|
|
||||||
- name: Set up chart-testing
|
- name: Set up chart-testing
|
||||||
uses: helm/chart-testing-action@v2.2.1
|
uses: helm/chart-testing-action@v2.3.1
|
||||||
|
|
||||||
- name: Run chart-testing (list-changed)
|
- name: Run chart-testing (list-changed)
|
||||||
id: list-changed
|
id: list-changed
|
||||||
@@ -73,7 +73,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Create kind cluster
|
- name: Create kind cluster
|
||||||
if: steps.list-changed.outputs.changed == 'true'
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
uses: helm/kind-action@v1.3.0
|
uses: helm/kind-action@v1.4.0
|
||||||
|
|
||||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||||
- name: Install cert-manager
|
- name: Install cert-manager
|
||||||
@@ -121,7 +121,7 @@ jobs:
|
|||||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||||
|
|
||||||
- name: Run chart-releaser
|
- name: Run chart-releaser
|
||||||
uses: helm/chart-releaser-action@v1.4.0
|
uses: helm/chart-releaser-action@v1.4.1
|
||||||
env:
|
env:
|
||||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
|
|
||||||
|
|||||||
29
.github/workflows/run-first-interaction.yaml
vendored
Normal file
29
.github/workflows/run-first-interaction.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
name: first-interaction
|
||||||
|
|
||||||
|
on:
|
||||||
|
issues:
|
||||||
|
types: [opened]
|
||||||
|
pull_request:
|
||||||
|
branches: [master]
|
||||||
|
types: [opened]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check_for_first_interaction:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions/first-interaction@main
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
issue-message: |
|
||||||
|
Hello! Thank you for filing an issue.
|
||||||
|
|
||||||
|
The maintainers will triage your issue shortly.
|
||||||
|
|
||||||
|
In the meantime, please take a look at the [troubleshooting guide](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/TROUBLESHOOTING.md) for bug reports.
|
||||||
|
|
||||||
|
If this is a feature request, please review our [contribution guidelines](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/CONTRIBUTING.md).
|
||||||
|
pr-message: |
|
||||||
|
Hello! Thank you for your contribution.
|
||||||
|
|
||||||
|
Please review our [contribution guidelines](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/CONTRIBUTING.md) to understand the project's testing and code conventions.
|
||||||
2
.github/workflows/run-stale.yaml
vendored
2
.github/workflows/run-stale.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
|||||||
issues: write # for actions/stale to close stale issues
|
issues: write # for actions/stale to close stale issues
|
||||||
pull-requests: write # for actions/stale to close stale PRs
|
pull-requests: write # for actions/stale to close stale PRs
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/stale@v5
|
- uses: actions/stale@v6
|
||||||
with:
|
with:
|
||||||
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
|
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
|
||||||
# turn off stale for both issues and PRs
|
# turn off stale for both issues and PRs
|
||||||
|
|||||||
5
.github/workflows/runners.yaml
vendored
5
.github/workflows/runners.yaml
vendored
@@ -25,7 +25,7 @@ on:
|
|||||||
- '!**.md'
|
- '!**.md'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUNNER_VERSION: 2.294.0
|
RUNNER_VERSION: 2.298.2
|
||||||
DOCKER_VERSION: 20.10.12
|
DOCKER_VERSION: 20.10.12
|
||||||
RUNNER_CONTAINER_HOOKS_VERSION: 0.1.2
|
RUNNER_CONTAINER_HOOKS_VERSION: 0.1.2
|
||||||
DOCKERHUB_USERNAME: summerwind
|
DOCKERHUB_USERNAME: summerwind
|
||||||
@@ -47,6 +47,9 @@ jobs:
|
|||||||
- name: actions-runner-dind
|
- name: actions-runner-dind
|
||||||
os-name: ubuntu
|
os-name: ubuntu
|
||||||
os-version: 20.04
|
os-version: 20.04
|
||||||
|
- name: actions-runner-dind-rootless
|
||||||
|
os-name: ubuntu
|
||||||
|
os-version: 20.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
|
|||||||
6
.github/workflows/validate-chart.yaml
vendored
6
.github/workflows/validate-chart.yaml
vendored
@@ -26,7 +26,7 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Set up Helm
|
- name: Set up Helm
|
||||||
uses: azure/setup-helm@v3.0
|
uses: azure/setup-helm@v3.3
|
||||||
with:
|
with:
|
||||||
version: ${{ env.HELM_VERSION }}
|
version: ${{ env.HELM_VERSION }}
|
||||||
|
|
||||||
@@ -52,7 +52,7 @@ jobs:
|
|||||||
python-version: '3.7'
|
python-version: '3.7'
|
||||||
|
|
||||||
- name: Set up chart-testing
|
- name: Set up chart-testing
|
||||||
uses: helm/chart-testing-action@v2.2.1
|
uses: helm/chart-testing-action@v2.3.1
|
||||||
|
|
||||||
- name: Run chart-testing (list-changed)
|
- name: Run chart-testing (list-changed)
|
||||||
id: list-changed
|
id: list-changed
|
||||||
@@ -67,7 +67,7 @@ jobs:
|
|||||||
ct lint --config charts/.ci/ct-config.yaml
|
ct lint --config charts/.ci/ct-config.yaml
|
||||||
|
|
||||||
- name: Create kind cluster
|
- name: Create kind cluster
|
||||||
uses: helm/kind-action@v1.3.0
|
uses: helm/kind-action@v1.4.0
|
||||||
if: steps.list-changed.outputs.changed == 'true'
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
|
||||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||||
|
|||||||
20
.github/workflows/validate-runners.yaml
vendored
20
.github/workflows/validate-runners.yaml
vendored
@@ -13,6 +13,26 @@ permissions:
|
|||||||
contents: read
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
shellcheck:
|
||||||
|
name: runner / shellcheck
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v1
|
||||||
|
- name: shellcheck
|
||||||
|
uses: reviewdog/action-shellcheck@v1
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
path: "./runner"
|
||||||
|
pattern: |
|
||||||
|
*.sh
|
||||||
|
*.bash
|
||||||
|
update-status
|
||||||
|
# Make this consistent with `make shellsheck`
|
||||||
|
shellcheck_flags: "--shell bash --source-path runner"
|
||||||
|
exclude: "./.git/*"
|
||||||
|
check_all_files_with_shebangs: "false"
|
||||||
|
# Set this to "true" once we addressed all the shellcheck findings
|
||||||
|
fail_on_error: "false"
|
||||||
test-runner-entrypoint:
|
test-runner-entrypoint:
|
||||||
name: Test entrypoint
|
name: Test entrypoint
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|||||||
17
.golangci.yaml
Normal file
17
.golangci.yaml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
run:
|
||||||
|
timeout: 3m
|
||||||
|
output:
|
||||||
|
format: github-actions
|
||||||
|
linters-settings:
|
||||||
|
errcheck:
|
||||||
|
exclude-functions:
|
||||||
|
- (net/http.ResponseWriter).Write
|
||||||
|
- (*net/http.Server).Shutdown
|
||||||
|
- (*github.com/actions-runner-controller/actions-runner-controller/simulator.VisibleRunnerGroups).Add
|
||||||
|
- (*github.com/actions-runner-controller/actions-runner-controller/testing.Kind).Stop
|
||||||
|
issues:
|
||||||
|
exclude-rules:
|
||||||
|
- path: controllers/suite_test.go
|
||||||
|
linters:
|
||||||
|
- staticcheck
|
||||||
|
text: "SA1019"
|
||||||
212
CONTRIBUTING.md
212
CONTRIBUTING.md
@@ -1,85 +1,53 @@
|
|||||||
## Contributing
|
# Contribution Guide
|
||||||
|
|
||||||
### Testing Controller Built from a Pull Request
|
- [Contribution Guide](#contribution-guide)
|
||||||
|
- [Welcome](#welcome)
|
||||||
|
- [Before contributing code](#before-contributing-code)
|
||||||
|
- [How to Contribute a Patch](#how-to-contribute-a-patch)
|
||||||
|
- [Developing the Controller](#developing-the-controller)
|
||||||
|
- [Developing the Runners](#developing-the-runners)
|
||||||
|
- [Tests](#tests)
|
||||||
|
- [Running Ginkgo Tests](#running-ginkgo-tests)
|
||||||
|
- [Running End to End Tests](#running-end-to-end-tests)
|
||||||
|
- [Rerunning a failed test](#rerunning-a-failed-test)
|
||||||
|
- [Testing in a non-kind cluster](#testing-in-a-non-kind-cluster)
|
||||||
|
- [Code conventions](#code-conventions)
|
||||||
|
- [Opening the Pull Request](#opening-the-pull-request)
|
||||||
|
- [Helm Version Changes](#helm-version-changes)
|
||||||
|
- [Testing Controller Built from a Pull Request](#testing-controller-built-from-a-pull-request)
|
||||||
|
|
||||||
We always appreciate your help in testing open pull requests by deploying custom builds of actions-runner-controller onto your own environment, so that we are extra sure we didn't break anything.
|
## Welcome
|
||||||
|
|
||||||
It is especially true when the pull request is about GitHub Enterprise, both GHEC and GHES, as [maintainers don't have GitHub Enterprise environments for testing](/README.md#github-enterprise-support).
|
This document is the single source of truth for how to contribute to the code base.
|
||||||
|
Feel free to browse the [open issues](https://github.com/actions-runner-controller/actions-runner-controller/issues) or file a new one, all feedback is welcome!
|
||||||
|
By reading this guide, we hope to give you all of the information you need to be able to pick up issues, contribute new features, and get your work
|
||||||
|
reviewed and merged.
|
||||||
|
|
||||||
The process would look like the below:
|
## Before contributing code
|
||||||
|
|
||||||
- Clone this repository locally
|
We welcome code patches, but to make sure things are well coordinated you should discuss any significant change before starting the work.
|
||||||
- Checkout the branch. If you use the `gh` command, run `gh pr checkout $PR_NUMBER`
|
The maintainers ask that you signal your intention to contribute to the project using the issue tracker.
|
||||||
- Run `NAME=$DOCKER_USER/actions-runner-controller VERSION=canary make docker-build docker-push` for a custom container image build
|
If there is an existing issue that you want to work on, please let us know so we can get it assigned to you.
|
||||||
- Update your actions-runner-controller's controller-manager deployment to use the new image, `$DOCKER_USER/actions-runner-controller:canary`
|
If you noticed a bug or want to add a new feature, there are issue templates you can fill out.
|
||||||
|
|
||||||
Please also note that you need to replace `$DOCKER_USER` with your own DockerHub account name.
|
When filing a feature request, the maintainers will review the change and give you a decision on whether we are willing to accept the feature into the project.
|
||||||
|
For significantly large and/or complex features, we may request that you write up an architectural decision record ([ADR](https://github.blog/2020-08-13-why-write-adrs/)) detailing the change.
|
||||||
|
Please use the [template](/adrs/0000-TEMPLATE.md) as guidance.
|
||||||
|
|
||||||
### How to Contribute a Patch
|
<!--
|
||||||
|
TODO: Add a pre-requisite section describing what developers should
|
||||||
|
install in order get started on ARC.
|
||||||
|
-->
|
||||||
|
|
||||||
Depending on what you are patching depends on how you should go about it. Below are some guides on how to test patches locally as well as develop the controller and runners.
|
## How to Contribute a Patch
|
||||||
|
|
||||||
When submitting a PR for a change please provide evidence that your change works as we still need to work on improving the CI of the project. Some resources are provided for helping achieve this, see this guide for details.
|
Depending on what you are patching depends on how you should go about it.
|
||||||
|
Below are some guides on how to test patches locally as well as develop the controller and runners.
|
||||||
|
|
||||||
#### Running an End to End Test
|
When submitting a PR for a change please provide evidence that your change works as we still need to work on improving the CI of the project.
|
||||||
|
Some resources are provided for helping achieve this, see this guide for details.
|
||||||
|
|
||||||
> **Notes for Ubuntu 20.04+ users**
|
### Developing the Controller
|
||||||
>
|
|
||||||
> If you're using Ubuntu 20.04 or greater, you might have installed `docker` with `snap`.
|
|
||||||
>
|
|
||||||
> If you want to stick with `snap`-provided `docker`, do not forget to set `TMPDIR` to
|
|
||||||
> somewhere under `$HOME`.
|
|
||||||
> Otherwise `kind load docker-image` fail while running `docker save`.
|
|
||||||
> See https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap for more information.
|
|
||||||
|
|
||||||
To test your local changes against both PAT and App based authentication please run the `acceptance` make target with the authentication configuration details provided:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
# This sets `VERSION` envvar to some appropriate value
|
|
||||||
. hack/make-env.sh
|
|
||||||
|
|
||||||
DOCKER_USER=*** \
|
|
||||||
GITHUB_TOKEN=*** \
|
|
||||||
APP_ID=*** \
|
|
||||||
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
|
||||||
INSTALLATION_ID=*** \
|
|
||||||
make acceptance
|
|
||||||
```
|
|
||||||
|
|
||||||
**Rerunning a failed test**
|
|
||||||
|
|
||||||
When one of tests run by `make acceptance` failed, you'd probably like to rerun only the failed one.
|
|
||||||
|
|
||||||
It can be done by `make acceptance/run` and by setting the combination of `ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm|kubectl` and `ACCEPTANCE_TEST_SECRET_TYPE=token|app` values that failed (note, you just need to set the corresponding authentication configuration in this circumstance)
|
|
||||||
|
|
||||||
In the example below, we rerun the test for the combination `ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=token` only:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
DOCKER_USER=*** \
|
|
||||||
GITHUB_TOKEN=*** \
|
|
||||||
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm
|
|
||||||
ACCEPTANCE_TEST_SECRET_TYPE=token \
|
|
||||||
make acceptance/run
|
|
||||||
```
|
|
||||||
|
|
||||||
**Testing in a non-kind cluster**
|
|
||||||
|
|
||||||
If you prefer to test in a non-kind cluster, you can instead run:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
KUBECONFIG=path/to/kubeconfig \
|
|
||||||
DOCKER_USER=*** \
|
|
||||||
GITHUB_TOKEN=*** \
|
|
||||||
APP_ID=*** \
|
|
||||||
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
|
||||||
INSTALLATION_ID=*** \
|
|
||||||
ACCEPTANCE_TEST_SECRET_TYPE=token \
|
|
||||||
make docker-build acceptance/setup \
|
|
||||||
acceptance/deploy \
|
|
||||||
acceptance/tests
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Developing the Controller
|
|
||||||
|
|
||||||
Rerunning the whole acceptance test suite from scratch on every little change to the controller, the runner, and the chart would be counter-productive.
|
Rerunning the whole acceptance test suite from scratch on every little change to the controller, the runner, and the chart would be counter-productive.
|
||||||
|
|
||||||
@@ -119,13 +87,14 @@ NAME=$DOCKER_USER/actions-runner make \
|
|||||||
(kubectl get po -ojsonpath={.items[*].metadata.name} | xargs -n1 kubectl delete po)
|
(kubectl get po -ojsonpath={.items[*].metadata.name} | xargs -n1 kubectl delete po)
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Developing the Runners
|
### Developing the Runners
|
||||||
|
|
||||||
**Tests**
|
#### Tests
|
||||||
|
|
||||||
A set of example pipelines (./acceptance/pipelines) are provided in this repository which you can use to validate your runners are working as expected. When raising a PR please run the relevant suites to prove your change hasn't broken anything.
|
A set of example pipelines (./acceptance/pipelines) are provided in this repository which you can use to validate your runners are working as expected.
|
||||||
|
When raising a PR please run the relevant suites to prove your change hasn't broken anything.
|
||||||
|
|
||||||
**Running Ginkgo Tests**
|
#### Running Ginkgo Tests
|
||||||
|
|
||||||
You can run the integration test suite that is written in Ginkgo with:
|
You can run the integration test suite that is written in Ginkgo with:
|
||||||
|
|
||||||
@@ -135,7 +104,8 @@ make test-with-deps
|
|||||||
|
|
||||||
This will firstly install a few binaries required to setup the integration test environment and then runs `go test` to start the Ginkgo test.
|
This will firstly install a few binaries required to setup the integration test environment and then runs `go test` to start the Ginkgo test.
|
||||||
|
|
||||||
If you don't want to use `make`, like when you're running tests from your IDE, install required binaries to `/usr/local/kubebuilder/bin`. That's the directory in which controller-runtime's `envtest` framework locates the binaries.
|
If you don't want to use `make`, like when you're running tests from your IDE, install required binaries to `/usr/local/kubebuilder/bin`.
|
||||||
|
That's the directory in which controller-runtime's `envtest` framework locates the binaries.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
sudo mkdir -p /usr/local/kubebuilder/bin
|
sudo mkdir -p /usr/local/kubebuilder/bin
|
||||||
@@ -152,6 +122,92 @@ GINKGO_FOCUS='[It] should create a new Runner resource from the specified templa
|
|||||||
go test -v -run TestAPIs github.com/actions-runner-controller/actions-runner-controller/controllers
|
go test -v -run TestAPIs github.com/actions-runner-controller/actions-runner-controller/controllers
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Helm Version Bumps
|
### Running End to End Tests
|
||||||
|
|
||||||
In general we ask you not to bump the version in your PR, the maintainers in general manage the publishing of a new chart.
|
> **Notes for Ubuntu 20.04+ users**
|
||||||
|
>
|
||||||
|
> If you're using Ubuntu 20.04 or greater, you might have installed `docker` with `snap`.
|
||||||
|
>
|
||||||
|
> If you want to stick with `snap`-provided `docker`, do not forget to set `TMPDIR` to somewhere under `$HOME`.
|
||||||
|
> Otherwise `kind load docker-image` fail while running `docker save`.
|
||||||
|
> See https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap for more information.
|
||||||
|
|
||||||
|
To test your local changes against both PAT and App based authentication please run the `acceptance` make target with the authentication configuration details provided:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# This sets `VERSION` envvar to some appropriate value
|
||||||
|
. hack/make-env.sh
|
||||||
|
|
||||||
|
DOCKER_USER=*** \
|
||||||
|
GITHUB_TOKEN=*** \
|
||||||
|
APP_ID=*** \
|
||||||
|
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
||||||
|
INSTALLATION_ID=*** \
|
||||||
|
make acceptance
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Rerunning a failed test
|
||||||
|
|
||||||
|
When one of tests run by `make acceptance` failed, you'd probably like to rerun only the failed one.
|
||||||
|
|
||||||
|
It can be done by `make acceptance/run` and by setting the combination of `ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm|kubectl` and `ACCEPTANCE_TEST_SECRET_TYPE=token|app` values that failed (note, you just need to set the corresponding authentication configuration in this circumstance)
|
||||||
|
|
||||||
|
In the example below, we rerun the test for the combination `ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=token` only:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
DOCKER_USER=*** \
|
||||||
|
GITHUB_TOKEN=*** \
|
||||||
|
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm \
|
||||||
|
ACCEPTANCE_TEST_SECRET_TYPE=token \
|
||||||
|
make acceptance/run
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Testing in a non-kind cluster
|
||||||
|
|
||||||
|
If you prefer to test in a non-kind cluster, you can instead run:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
KUBECONFIG=path/to/kubeconfig \
|
||||||
|
DOCKER_USER=*** \
|
||||||
|
GITHUB_TOKEN=*** \
|
||||||
|
APP_ID=*** \
|
||||||
|
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
||||||
|
INSTALLATION_ID=*** \
|
||||||
|
ACCEPTANCE_TEST_SECRET_TYPE=token \
|
||||||
|
make docker-build acceptance/setup \
|
||||||
|
acceptance/deploy \
|
||||||
|
acceptance/tests
|
||||||
|
```
|
||||||
|
|
||||||
|
### Code conventions
|
||||||
|
|
||||||
|
Before shipping your PR, please check the following items to make sure CI passes.
|
||||||
|
|
||||||
|
- Run `go mod tidy` if you made changes to dependencies.
|
||||||
|
- Format the code using `gofmt`
|
||||||
|
- Run the `golangci-lint` tool locally.
|
||||||
|
- We recommend you use `make lint` to run the tool using a Docker container matching the CI version.
|
||||||
|
|
||||||
|
### Opening the Pull Request
|
||||||
|
|
||||||
|
Send PR, add issue number to description
|
||||||
|
|
||||||
|
## Helm Version Changes
|
||||||
|
|
||||||
|
In general we ask you not to bump the version in your PR.
|
||||||
|
The maintainers will manage releases and publishing new charts.
|
||||||
|
|
||||||
|
## Testing Controller Built from a Pull Request
|
||||||
|
|
||||||
|
We always appreciate your help in testing open pull requests by deploying custom builds of actions-runner-controller onto your own environment, so that we are extra sure we didn't break anything.
|
||||||
|
|
||||||
|
It is especially true when the pull request is about GitHub Enterprise, both GHEC and GHES, as [maintainers don't have GitHub Enterprise environments for testing](docs/detailed-docs.md#github-enterprise-support).
|
||||||
|
|
||||||
|
The process would look like the below:
|
||||||
|
|
||||||
|
- Clone this repository locally
|
||||||
|
- Checkout the branch. If you use the `gh` command, run `gh pr checkout $PR_NUMBER`
|
||||||
|
- Run `NAME=$DOCKER_USER/actions-runner-controller VERSION=canary make docker-build docker-push` for a custom container image build
|
||||||
|
- Update your actions-runner-controller's controller-manager deployment to use the new image, `$DOCKER_USER/actions-runner-controller:canary`
|
||||||
|
|
||||||
|
Please also note that you need to replace `$DOCKER_USER` with your own DockerHub account name.
|
||||||
13
Dockerfile
13
Dockerfile
@@ -1,11 +1,10 @@
|
|||||||
# Build the manager binary
|
# Build the manager binary
|
||||||
FROM --platform=$BUILDPLATFORM golang:1.18.3 as builder
|
FROM --platform=$BUILDPLATFORM golang:1.19.2 as builder
|
||||||
|
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
# Make it runnable on a distroless image/without libc
|
# Make it runnable on a distroless image/without libc
|
||||||
ENV CGO_ENABLED=0
|
ENV CGO_ENABLED=0
|
||||||
|
|
||||||
# Copy the Go Modules manifests
|
# Copy the Go Modules manifests
|
||||||
COPY go.mod go.sum ./
|
COPY go.mod go.sum ./
|
||||||
|
|
||||||
@@ -25,20 +24,20 @@ RUN go mod download
|
|||||||
# With the above commmand,
|
# With the above commmand,
|
||||||
# TARGETOS can be "linux", TARGETARCH can be "amd64", "arm64", and "arm", TARGETVARIANT can be "v7".
|
# TARGETOS can be "linux", TARGETARCH can be "amd64", "arm64", and "arm", TARGETVARIANT can be "v7".
|
||||||
|
|
||||||
ARG TARGETPLATFORM TARGETOS TARGETARCH TARGETVARIANT
|
ARG TARGETPLATFORM TARGETOS TARGETARCH TARGETVARIANT VERSION=dev
|
||||||
|
|
||||||
# We intentionally avoid `--mount=type=cache,mode=0777,target=/go/pkg/mod` in the `go mod download` and the `go build` runs
|
# We intentionally avoid `--mount=type=cache,mode=0777,target=/go/pkg/mod` in the `go mod download` and the `go build` runs
|
||||||
# to avoid https://github.com/moby/buildkit/issues/2334
|
# to avoid https://github.com/moby/buildkit/issues/2334
|
||||||
# We can use docker layer cache so the build is fast enogh anyway
|
# We can use docker layer cache so the build is fast enogh anyway
|
||||||
# We also use per-platform GOCACHE for the same reason.
|
# We also use per-platform GOCACHE for the same reason.
|
||||||
env GOCACHE /build/${TARGETPLATFORM}/root/.cache/go-build
|
ENV GOCACHE /build/${TARGETPLATFORM}/root/.cache/go-build
|
||||||
|
|
||||||
# Build
|
# Build
|
||||||
RUN --mount=target=. \
|
RUN --mount=target=. \
|
||||||
--mount=type=cache,mode=0777,target=${GOCACHE} \
|
--mount=type=cache,mode=0777,target=${GOCACHE} \
|
||||||
export GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOARM=${TARGETVARIANT#v} && \
|
export GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOARM=${TARGETVARIANT#v} && \
|
||||||
go build -o /out/manager main.go && \
|
go build -trimpath -ldflags="-s -w -X 'github.com/actions-runner-controller/actions-runner-controller/build.Version=${VERSION}'" -o /out/manager main.go && \
|
||||||
go build -o /out/github-webhook-server ./cmd/githubwebhookserver
|
go build -trimpath -ldflags="-s -w" -o /out/github-webhook-server ./cmd/githubwebhookserver
|
||||||
|
|
||||||
# Use distroless as minimal base image to package the manager binary
|
# Use distroless as minimal base image to package the manager binary
|
||||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||||
@@ -49,6 +48,6 @@ WORKDIR /
|
|||||||
COPY --from=builder /out/manager .
|
COPY --from=builder /out/manager .
|
||||||
COPY --from=builder /out/github-webhook-server .
|
COPY --from=builder /out/github-webhook-server .
|
||||||
|
|
||||||
USER nonroot:nonroot
|
USER 65532:65532
|
||||||
|
|
||||||
ENTRYPOINT ["/manager"]
|
ENTRYPOINT ["/manager"]
|
||||||
|
|||||||
39
Makefile
39
Makefile
@@ -4,8 +4,8 @@ else
|
|||||||
NAME ?= summerwind/actions-runner-controller
|
NAME ?= summerwind/actions-runner-controller
|
||||||
endif
|
endif
|
||||||
DOCKER_USER ?= $(shell echo ${NAME} | cut -d / -f1)
|
DOCKER_USER ?= $(shell echo ${NAME} | cut -d / -f1)
|
||||||
VERSION ?= latest
|
VERSION ?= dev
|
||||||
RUNNER_VERSION ?= 2.294.0
|
RUNNER_VERSION ?= 2.298.2
|
||||||
TARGETPLATFORM ?= $(shell arch)
|
TARGETPLATFORM ?= $(shell arch)
|
||||||
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||||
RUNNER_TAG ?= ${VERSION}
|
RUNNER_TAG ?= ${VERSION}
|
||||||
@@ -19,6 +19,7 @@ KUBECONTEXT ?= kind-acceptance
|
|||||||
CLUSTER ?= acceptance
|
CLUSTER ?= acceptance
|
||||||
CERT_MANAGER_VERSION ?= v1.1.1
|
CERT_MANAGER_VERSION ?= v1.1.1
|
||||||
KUBE_RBAC_PROXY_VERSION ?= v0.11.0
|
KUBE_RBAC_PROXY_VERSION ?= v0.11.0
|
||||||
|
SHELLCHECK_VERSION ?= 0.8.0
|
||||||
|
|
||||||
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
|
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
|
||||||
CRD_OPTIONS ?= "crd:generateEmbeddedObjectMeta=true"
|
CRD_OPTIONS ?= "crd:generateEmbeddedObjectMeta=true"
|
||||||
@@ -31,6 +32,7 @@ GOBIN=$(shell go env GOBIN)
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
TEST_ASSETS=$(PWD)/test-assets
|
TEST_ASSETS=$(PWD)/test-assets
|
||||||
|
TOOLS_PATH=$(PWD)/.tools
|
||||||
|
|
||||||
# default list of platforms for which multiarch image is built
|
# default list of platforms for which multiarch image is built
|
||||||
ifeq (${PLATFORMS}, )
|
ifeq (${PLATFORMS}, )
|
||||||
@@ -51,10 +53,13 @@ endif
|
|||||||
|
|
||||||
all: manager
|
all: manager
|
||||||
|
|
||||||
|
lint:
|
||||||
|
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.49.0 golangci-lint run
|
||||||
|
|
||||||
GO_TEST_ARGS ?= -short
|
GO_TEST_ARGS ?= -short
|
||||||
|
|
||||||
# Run tests
|
# Run tests
|
||||||
test: generate fmt vet manifests
|
test: generate fmt vet manifests shellcheck
|
||||||
go test $(GO_TEST_ARGS) ./... -coverprofile cover.out
|
go test $(GO_TEST_ARGS) ./... -coverprofile cover.out
|
||||||
go test -fuzz=Fuzz -fuzztime=10s -run=Fuzz* ./controllers
|
go test -fuzz=Fuzz -fuzztime=10s -run=Fuzz* ./controllers
|
||||||
|
|
||||||
@@ -92,7 +97,7 @@ manifests: manifests-gen-crds chart-crds
|
|||||||
manifests-gen-crds: controller-gen yq
|
manifests-gen-crds: controller-gen yq
|
||||||
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||||
for YAMLFILE in config/crd/bases/actions*.yaml; do \
|
for YAMLFILE in config/crd/bases/actions*.yaml; do \
|
||||||
$(YQ) write --inplace "$$YAMLFILE" spec.preserveUnknownFields false; \
|
$(YQ) '.spec.preserveUnknownFields = false' --inplace "$$YAMLFILE" ; \
|
||||||
done
|
done
|
||||||
|
|
||||||
chart-crds:
|
chart-crds:
|
||||||
@@ -110,6 +115,10 @@ vet:
|
|||||||
generate: controller-gen
|
generate: controller-gen
|
||||||
$(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths="./..."
|
$(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths="./..."
|
||||||
|
|
||||||
|
# Run shellcheck on runner scripts
|
||||||
|
shellcheck: shellcheck-install
|
||||||
|
$(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.bash runner/*.sh
|
||||||
|
|
||||||
docker-buildx:
|
docker-buildx:
|
||||||
export DOCKER_CLI_EXPERIMENTAL=enabled ;\
|
export DOCKER_CLI_EXPERIMENTAL=enabled ;\
|
||||||
export DOCKER_BUILDKIT=1
|
export DOCKER_BUILDKIT=1
|
||||||
@@ -119,6 +128,7 @@ docker-buildx:
|
|||||||
docker buildx build --platform ${PLATFORMS} \
|
docker buildx build --platform ${PLATFORMS} \
|
||||||
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
||||||
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
||||||
|
--build-arg VERSION=${VERSION} \
|
||||||
-t "${NAME}:${VERSION}" \
|
-t "${NAME}:${VERSION}" \
|
||||||
-f Dockerfile \
|
-f Dockerfile \
|
||||||
. ${PUSH_ARG}
|
. ${PUSH_ARG}
|
||||||
@@ -242,12 +252,31 @@ ifeq (, $(wildcard $(GOBIN)/yq))
|
|||||||
YQ_TMP_DIR=$$(mktemp -d) ;\
|
YQ_TMP_DIR=$$(mktemp -d) ;\
|
||||||
cd $$YQ_TMP_DIR ;\
|
cd $$YQ_TMP_DIR ;\
|
||||||
go mod init tmp ;\
|
go mod init tmp ;\
|
||||||
go install github.com/mikefarah/yq/v3@3.4.0 ;\
|
go install github.com/mikefarah/yq/v4@v4.25.3 ;\
|
||||||
rm -rf $$YQ_TMP_DIR ;\
|
rm -rf $$YQ_TMP_DIR ;\
|
||||||
}
|
}
|
||||||
endif
|
endif
|
||||||
YQ=$(GOBIN)/yq
|
YQ=$(GOBIN)/yq
|
||||||
|
|
||||||
|
# find or download shellcheck
|
||||||
|
# download shellcheck if necessary
|
||||||
|
shellcheck-install:
|
||||||
|
ifeq (, $(wildcard $(TOOLS_PATH)/shellcheck))
|
||||||
|
echo "Downloading shellcheck"
|
||||||
|
@{ \
|
||||||
|
set -e ;\
|
||||||
|
SHELLCHECK_TMP_DIR=$$(mktemp -d) ;\
|
||||||
|
cd $$SHELLCHECK_TMP_DIR ;\
|
||||||
|
curl -LO https://github.com/koalaman/shellcheck/releases/download/v$(SHELLCHECK_VERSION)/shellcheck-v$(SHELLCHECK_VERSION).linux.x86_64.tar.xz ;\
|
||||||
|
tar Jxvf shellcheck-v$(SHELLCHECK_VERSION).linux.x86_64.tar.xz ;\
|
||||||
|
cd $(CURDIR) ;\
|
||||||
|
mkdir -p $(TOOLS_PATH) ;\
|
||||||
|
mv $$SHELLCHECK_TMP_DIR/shellcheck-v$(SHELLCHECK_VERSION)/shellcheck $(TOOLS_PATH)/ ;\
|
||||||
|
rm -rf $$SHELLCHECK_TMP_DIR ;\
|
||||||
|
}
|
||||||
|
endif
|
||||||
|
SHELLCHECK=$(TOOLS_PATH)/shellcheck
|
||||||
|
|
||||||
OS_NAME := $(shell uname -s | tr A-Z a-z)
|
OS_NAME := $(shell uname -s | tr A-Z a-z)
|
||||||
|
|
||||||
# find or download etcd
|
# find or download etcd
|
||||||
|
|||||||
@@ -167,7 +167,7 @@ are in a namespace not shared with anything else_
|
|||||||
|
|
||||||
**Problem**
|
**Problem**
|
||||||
|
|
||||||
ARC isn't involved in jobs actually getting allocated to a runner. ARC is responsible for orchestrating runners and the runner lifecycle. Why some people see large delays in job allocation is not clear however it has been https://github.com/actions-runner-controller/actions-runner-controller/issues/1387#issuecomment-1122593984 that this is caused from the self-update process somehow.
|
ARC isn't involved in jobs actually getting allocated to a runner. ARC is responsible for orchestrating runners and the runner lifecycle. Why some people see large delays in job allocation is not clear however it has been confirmed https://github.com/actions-runner-controller/actions-runner-controller/issues/1387#issuecomment-1122593984 that this is caused from the self-update process somehow.
|
||||||
|
|
||||||
**Solution**
|
**Solution**
|
||||||
|
|
||||||
@@ -256,8 +256,28 @@ spec:
|
|||||||
env: []
|
env: []
|
||||||
```
|
```
|
||||||
|
|
||||||
There may be more places you need to tweak for MTU.
|
If the issue still persists, you can set the `ARC_DOCKER_MTU_PROPAGATION` to propagate the host MTU to networks created
|
||||||
Please consult issues like #651 for more information.
|
by the GitHub Runner. For instance:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: RunnerDeployment
|
||||||
|
metadata:
|
||||||
|
name: github-runner
|
||||||
|
namespace: github-system
|
||||||
|
spec:
|
||||||
|
replicas: 6
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
dockerMTU: 1400
|
||||||
|
repository: $username/$repo
|
||||||
|
env:
|
||||||
|
- name: ARC_DOCKER_MTU_PROPAGATION
|
||||||
|
value: "true"
|
||||||
|
```
|
||||||
|
|
||||||
|
You can read the discussion regarding this issue in
|
||||||
|
(#1406)[https://github.com/actions-runner-controller/actions-runner-controller/issues/1046].
|
||||||
|
|
||||||
## Unable to scale to zero with TotalNumberOfQueuedAndInProgressWorkflowRuns
|
## Unable to scale to zero with TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||||
|
|
||||||
|
|||||||
@@ -41,8 +41,23 @@ TEST_ID=${TEST_ID:-default}
|
|||||||
|
|
||||||
if [ "${tool}" == "helm" ]; then
|
if [ "${tool}" == "helm" ]; then
|
||||||
set -v
|
set -v
|
||||||
|
|
||||||
|
CHART=${CHART:-charts/actions-runner-controller}
|
||||||
|
|
||||||
|
flags=()
|
||||||
|
if [ "${IMAGE_PULL_SECRET}" != "" ]; then
|
||||||
|
flags+=( --set imagePullSecrets[0].name=${IMAGE_PULL_SECRET})
|
||||||
|
flags+=( --set image.actionsRunnerImagePullSecrets[0].name=${IMAGE_PULL_SECRET})
|
||||||
|
flags+=( --set githubWebhookServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET})
|
||||||
|
fi
|
||||||
|
if [ "${CHART_VERSION}" != "" ]; then
|
||||||
|
flags+=( --version ${CHART_VERSION})
|
||||||
|
fi
|
||||||
|
|
||||||
|
set -vx
|
||||||
|
|
||||||
helm upgrade --install actions-runner-controller \
|
helm upgrade --install actions-runner-controller \
|
||||||
charts/actions-runner-controller \
|
${CHART} \
|
||||||
-n actions-runner-system \
|
-n actions-runner-system \
|
||||||
--create-namespace \
|
--create-namespace \
|
||||||
--set syncPeriod=${SYNC_PERIOD} \
|
--set syncPeriod=${SYNC_PERIOD} \
|
||||||
@@ -51,9 +66,7 @@ if [ "${tool}" == "helm" ]; then
|
|||||||
--set image.tag=${VERSION} \
|
--set image.tag=${VERSION} \
|
||||||
--set podAnnotations.test-id=${TEST_ID} \
|
--set podAnnotations.test-id=${TEST_ID} \
|
||||||
--set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \
|
--set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \
|
||||||
--set imagePullSecrets[0].name=${IMAGE_PULL_SECRET} \
|
${flags[@]} --set image.imagePullPolicy=${IMAGE_PULL_POLICY} \
|
||||||
--set image.actionsRunnerImagePullSecrets[0].name=${IMAGE_PULL_SECRET} \
|
|
||||||
--set githubWebhookServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET} \
|
|
||||||
-f ${VALUES_FILE}
|
-f ${VALUES_FILE}
|
||||||
set +v
|
set +v
|
||||||
# To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes`
|
# To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes`
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ OP=${OP:-apply}
|
|||||||
|
|
||||||
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
|
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
|
||||||
|
|
||||||
|
cat acceptance/testdata/kubernetes_container_mode.envsubst.yaml | NAMESPACE=${RUNNER_NAMESPACE} envsubst | kubectl apply -f -
|
||||||
|
|
||||||
if [ -n "${TEST_REPO}" ]; then
|
if [ -n "${TEST_REPO}" ]; then
|
||||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerset envsubst | kubectl ${OP} -f -
|
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerset envsubst | kubectl ${OP} -f -
|
||||||
|
|||||||
86
acceptance/testdata/kubernetes_container_mode.envsubst.yaml
vendored
Normal file
86
acceptance/testdata/kubernetes_container_mode.envsubst.yaml
vendored
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
# USAGE:
|
||||||
|
# cat acceptance/testdata/kubernetes_container_mode.envsubst.yaml | NAMESPACE=default envsubst | kubectl apply -f -
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: k8s-mode-runner
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["pods"]
|
||||||
|
verbs: ["get", "list", "create", "delete"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["pods/exec"]
|
||||||
|
verbs: ["get", "create"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["pods/log"]
|
||||||
|
verbs: ["get", "list", "watch",]
|
||||||
|
- apiGroups: ["batch"]
|
||||||
|
resources: ["jobs"]
|
||||||
|
verbs: ["get", "list", "create", "delete"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["secrets"]
|
||||||
|
verbs: ["get", "list", "create", "delete"]
|
||||||
|
# Needed to report test success by crating a cm from within workflow job step
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["configmaps"]
|
||||||
|
verbs: ["create", "delete"]
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: runner-status-updater
|
||||||
|
rules:
|
||||||
|
- apiGroups: ["actions.summerwind.dev"]
|
||||||
|
resources: ["runners/status"]
|
||||||
|
verbs: ["get", "update", "patch"]
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: ${RUNNER_SERVICE_ACCOUNT_NAME}
|
||||||
|
namespace: ${NAMESPACE}
|
||||||
|
---
|
||||||
|
# To verify it's working, try:
|
||||||
|
# kubectl auth can-i --as system:serviceaccount:default:runner get pod
|
||||||
|
# If incomplete, workflows and jobs would fail with an error message like:
|
||||||
|
# Error: Error: The Service account needs the following permissions [{"group":"","verbs":["get","list","create","delete"],"resource":"pods","subresource":""},{"group":"","verbs":["get","create"],"resource":"pods","subresource":"exec"},{"group":"","verbs":["get","list","watch"],"resource":"pods","subresource":"log"},{"group":"batch","verbs":["get","list","create","delete"],"resource":"jobs","subresource":""},{"group":"","verbs":["create","delete","get","list"],"resource":"secrets","subresource":""}] on the pod resource in the 'default' namespace. Please contact your self hosted runner administrator.
|
||||||
|
# Error: Process completed with exit code 1.
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
# This role binding allows "jane" to read pods in the "default" namespace.
|
||||||
|
# You need to already have a Role named "pod-reader" in that namespace.
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
name: runner-k8s-mode-runner
|
||||||
|
namespace: ${NAMESPACE}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: ${RUNNER_SERVICE_ACCOUNT_NAME}
|
||||||
|
namespace: ${NAMESPACE}
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: k8s-mode-runner
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
name: runner-runner-stat-supdater
|
||||||
|
namespace: ${NAMESPACE}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: ${RUNNER_SERVICE_ACCOUNT_NAME}
|
||||||
|
namespace: ${NAMESPACE}
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: runner-status-updater
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: org-runnerdeploy-runner-work-dir
|
||||||
|
labels:
|
||||||
|
content: org-runnerdeploy-runner-work-dir
|
||||||
|
provisioner: rancher.io/local-path
|
||||||
|
reclaimPolicy: Delete
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
30
acceptance/testdata/runnerdeploy.envsubst.yaml
vendored
30
acceptance/testdata/runnerdeploy.envsubst.yaml
vendored
@@ -1,3 +1,13 @@
|
|||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: ${NAME}-runner-work-dir
|
||||||
|
labels:
|
||||||
|
content: ${NAME}-runner-work-dir
|
||||||
|
provisioner: rancher.io/local-path
|
||||||
|
reclaimPolicy: Delete
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
---
|
||||||
apiVersion: actions.summerwind.dev/v1alpha1
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
kind: RunnerDeployment
|
kind: RunnerDeployment
|
||||||
metadata:
|
metadata:
|
||||||
@@ -39,10 +49,30 @@ spec:
|
|||||||
labels:
|
labels:
|
||||||
- "${RUNNER_LABEL}"
|
- "${RUNNER_LABEL}"
|
||||||
|
|
||||||
|
env:
|
||||||
|
- name: ROLLING_UPDATE_PHASE
|
||||||
|
value: "${ROLLING_UPDATE_PHASE}"
|
||||||
|
- name: ARC_DOCKER_MTU_PROPAGATION
|
||||||
|
value: "true"
|
||||||
|
|
||||||
|
dockerMTU: 1400
|
||||||
|
|
||||||
#
|
#
|
||||||
# Non-standard working directory
|
# Non-standard working directory
|
||||||
#
|
#
|
||||||
# workDir: "/"
|
# workDir: "/"
|
||||||
|
|
||||||
|
# # Uncomment the below to enable the kubernetes container mode
|
||||||
|
# # See https://github.com/actions-runner-controller/actions-runner-controller#runner-with-k8s-jobs
|
||||||
|
containerMode: ${RUNNER_CONTAINER_MODE}
|
||||||
|
workVolumeClaimTemplate:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
storageClassName: "${NAME}-runner-work-dir"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Gi
|
||||||
|
serviceAccountName: ${RUNNER_SERVICE_ACCOUNT_NAME}
|
||||||
---
|
---
|
||||||
apiVersion: actions.summerwind.dev/v1alpha1
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
kind: HorizontalRunnerAutoscaler
|
kind: HorizontalRunnerAutoscaler
|
||||||
|
|||||||
27
acceptance/testdata/runnerset.envsubst.yaml
vendored
27
acceptance/testdata/runnerset.envsubst.yaml
vendored
@@ -112,6 +112,7 @@ spec:
|
|||||||
labels:
|
labels:
|
||||||
app: ${NAME}
|
app: ${NAME}
|
||||||
spec:
|
spec:
|
||||||
|
serviceAccountName: ${RUNNER_SERVICE_ACCOUNT_NAME}
|
||||||
containers:
|
containers:
|
||||||
- name: runner
|
- name: runner
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
@@ -120,6 +121,8 @@ spec:
|
|||||||
value: "${RUNNER_FEATURE_FLAG_EPHEMERAL}"
|
value: "${RUNNER_FEATURE_FLAG_EPHEMERAL}"
|
||||||
- name: GOMODCACHE
|
- name: GOMODCACHE
|
||||||
value: "/home/runner/.cache/go-mod"
|
value: "/home/runner/.cache/go-mod"
|
||||||
|
- name: ROLLING_UPDATE_PHASE
|
||||||
|
value: "${ROLLING_UPDATE_PHASE}"
|
||||||
# PV-backed runner work dir
|
# PV-backed runner work dir
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
# Comment out the ephemeral work volume if you're going to test the kubernetes container mode
|
# Comment out the ephemeral work volume if you're going to test the kubernetes container mode
|
||||||
@@ -152,19 +155,19 @@ spec:
|
|||||||
# https://github.com/actions/setup-go/blob/56a61c9834b4a4950dbbf4740af0b8a98c73b768/src/installer.ts#L144
|
# https://github.com/actions/setup-go/blob/56a61c9834b4a4950dbbf4740af0b8a98c73b768/src/installer.ts#L144
|
||||||
mountPath: "/opt/hostedtoolcache"
|
mountPath: "/opt/hostedtoolcache"
|
||||||
# Valid only when dockerdWithinRunnerContainer=false
|
# Valid only when dockerdWithinRunnerContainer=false
|
||||||
- name: docker
|
# - name: docker
|
||||||
# PV-backed runner work dir
|
# # PV-backed runner work dir
|
||||||
volumeMounts:
|
# volumeMounts:
|
||||||
- name: work
|
# - name: work
|
||||||
mountPath: /runner/_work
|
# mountPath: /runner/_work
|
||||||
# Cache docker image layers, in case dockerdWithinRunnerContainer=false
|
# # Cache docker image layers, in case dockerdWithinRunnerContainer=false
|
||||||
- name: var-lib-docker
|
# - name: var-lib-docker
|
||||||
mountPath: /var/lib/docker
|
# mountPath: /var/lib/docker
|
||||||
# image: mumoshu/actions-runner-dind:dev
|
# # image: mumoshu/actions-runner-dind:dev
|
||||||
|
|
||||||
# For buildx cache
|
# # For buildx cache
|
||||||
- name: cache
|
# - name: cache
|
||||||
mountPath: "/home/runner/.cache"
|
# mountPath: "/home/runner/.cache"
|
||||||
# Comment out the ephemeral work volume if you're going to test the kubernetes container mode
|
# Comment out the ephemeral work volume if you're going to test the kubernetes container mode
|
||||||
# volumes:
|
# volumes:
|
||||||
# - name: work
|
# - name: work
|
||||||
|
|||||||
@@ -1,13 +1,18 @@
|
|||||||
# Set actions-runner-controller settings for testing
|
# Set actions-runner-controller settings for testing
|
||||||
logLevel: "-4"
|
logLevel: "-4"
|
||||||
imagePullSecrets:
|
imagePullSecrets: []
|
||||||
- name:
|
|
||||||
image:
|
image:
|
||||||
actionsRunnerImagePullSecrets:
|
# This needs to be an empty array rather than a single-item array with empty name.
|
||||||
- name:
|
# Otherwise you end up with the following error on helm-upgrade:
|
||||||
|
# Error: UPGRADE FAILED: failed to create patch: map: map[] does not contain declared merge key: name && failed to create patch: map: map[] does not contain declared merge key: name
|
||||||
|
actionsRunnerImagePullSecrets: []
|
||||||
|
runner:
|
||||||
|
statusUpdateHook:
|
||||||
|
enabled: true
|
||||||
|
rbac:
|
||||||
|
allowGrantingKubernetesContainerModePermissions: true
|
||||||
githubWebhookServer:
|
githubWebhookServer:
|
||||||
imagePullSecrets:
|
imagePullSecrets: []
|
||||||
- name:
|
|
||||||
logLevel: "-4"
|
logLevel: "-4"
|
||||||
enabled: true
|
enabled: true
|
||||||
labels: {}
|
labels: {}
|
||||||
|
|||||||
18
adrs/0000-TEMPLATE.md
Normal file
18
adrs/0000-TEMPLATE.md
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
# Title
|
||||||
|
|
||||||
|
<!-- ADR titles should typically be imperative sentences. -->
|
||||||
|
|
||||||
|
**Status**: (Proposed|Accepted|Rejected|Superceded|Deprecated)
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
*What is the issue or background knowledge necessary for future readers
|
||||||
|
to understand why this ADR was written?*
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
**What** is the change being proposed? / **How** will it be implemented?*
|
||||||
|
|
||||||
|
## Consequences
|
||||||
|
|
||||||
|
*What becomes easier or more difficult to do because of this change?*
|
||||||
@@ -60,6 +60,9 @@ type HorizontalRunnerAutoscalerSpec struct {
|
|||||||
// The earlier a scheduled override is, the higher it is prioritized.
|
// The earlier a scheduled override is, the higher it is prioritized.
|
||||||
// +optional
|
// +optional
|
||||||
ScheduledOverrides []ScheduledOverride `json:"scheduledOverrides,omitempty"`
|
ScheduledOverrides []ScheduledOverride `json:"scheduledOverrides,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
GitHubAPICredentialsFrom *GitHubAPICredentialsFrom `json:"githubAPICredentialsFrom,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ScaleUpTrigger struct {
|
type ScaleUpTrigger struct {
|
||||||
@@ -130,7 +133,7 @@ type ScaleTargetRef struct {
|
|||||||
|
|
||||||
type MetricSpec struct {
|
type MetricSpec struct {
|
||||||
// Type is the type of metric to be used for autoscaling.
|
// Type is the type of metric to be used for autoscaling.
|
||||||
// The only supported Type is TotalNumberOfQueuedAndInProgressWorkflowRuns
|
// It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
|
||||||
Type string `json:"type,omitempty"`
|
Type string `json:"type,omitempty"`
|
||||||
|
|
||||||
// RepositoryNames is the list of repository names to be used for calculating the metric.
|
// RepositoryNames is the list of repository names to be used for calculating the metric.
|
||||||
@@ -170,7 +173,7 @@ type MetricSpec struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
// ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||||
// A schedule can optionally be recurring, so that the correspoding override happens every day, week, month, or year.
|
// A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
|
||||||
type ScheduledOverride struct {
|
type ScheduledOverride struct {
|
||||||
// StartTime is the time at which the first override starts.
|
// StartTime is the time at which the first override starts.
|
||||||
StartTime metav1.Time `json:"startTime"`
|
StartTime metav1.Time `json:"startTime"`
|
||||||
|
|||||||
@@ -76,6 +76,16 @@ type RunnerConfig struct {
|
|||||||
|
|
||||||
// +optional
|
// +optional
|
||||||
ContainerMode string `json:"containerMode,omitempty"`
|
ContainerMode string `json:"containerMode,omitempty"`
|
||||||
|
|
||||||
|
GitHubAPICredentialsFrom *GitHubAPICredentialsFrom `json:"githubAPICredentialsFrom,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GitHubAPICredentialsFrom struct {
|
||||||
|
SecretRef SecretReference `json:"secretRef,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SecretReference struct {
|
||||||
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunnerPodSpec defines the desired pod spec fields of the runner pod
|
// RunnerPodSpec defines the desired pod spec fields of the runner pod
|
||||||
@@ -160,6 +170,9 @@ type RunnerPodSpec struct {
|
|||||||
// +optional
|
// +optional
|
||||||
RuntimeClassName *string `json:"runtimeClassName,omitempty"`
|
RuntimeClassName *string `json:"runtimeClassName,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
DnsPolicy corev1.DNSPolicy `json:"dnsPolicy,omitempty"`
|
||||||
|
|
||||||
// +optional
|
// +optional
|
||||||
DnsConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"`
|
DnsConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"`
|
||||||
|
|
||||||
@@ -183,11 +196,6 @@ func (rs *RunnerSpec) Validate(rootPath *field.Path) field.ErrorList {
|
|||||||
errList = append(errList, field.Invalid(rootPath.Child("workVolumeClaimTemplate"), rs.WorkVolumeClaimTemplate, err.Error()))
|
errList = append(errList, field.Invalid(rootPath.Child("workVolumeClaimTemplate"), rs.WorkVolumeClaimTemplate, err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
err = rs.validateIsServiceAccountNameSet()
|
|
||||||
if err != nil {
|
|
||||||
errList = append(errList, field.Invalid(rootPath.Child("serviceAccountName"), rs.ServiceAccountName, err.Error()))
|
|
||||||
}
|
|
||||||
|
|
||||||
return errList
|
return errList
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -226,17 +234,6 @@ func (rs *RunnerSpec) validateWorkVolumeClaimTemplate() error {
|
|||||||
return rs.WorkVolumeClaimTemplate.validate()
|
return rs.WorkVolumeClaimTemplate.validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RunnerSpec) validateIsServiceAccountNameSet() error {
|
|
||||||
if rs.ContainerMode != "kubernetes" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if rs.ServiceAccountName == "" {
|
|
||||||
return errors.New("service account name is required if container mode is kubernetes")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunnerStatus defines the observed state of Runner
|
// RunnerStatus defines the observed state of Runner
|
||||||
type RunnerStatus struct {
|
type RunnerStatus struct {
|
||||||
// Turns true only if the runner pod is ready.
|
// Turns true only if the runner pod is ready.
|
||||||
@@ -315,8 +312,10 @@ func (w *WorkVolumeClaimTemplate) V1VolumeMount(mountPath string) corev1.VolumeM
|
|||||||
// +kubebuilder:printcolumn:JSONPath=".spec.enterprise",name=Enterprise,type=string
|
// +kubebuilder:printcolumn:JSONPath=".spec.enterprise",name=Enterprise,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.organization",name=Organization,type=string
|
// +kubebuilder:printcolumn:JSONPath=".spec.organization",name=Organization,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.repository",name=Repository,type=string
|
// +kubebuilder:printcolumn:JSONPath=".spec.repository",name=Repository,type=string
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".spec.group",name=Group,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.labels",name=Labels,type=string
|
// +kubebuilder:printcolumn:JSONPath=".spec.labels",name=Labels,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=".status.phase",name=Status,type=string
|
// +kubebuilder:printcolumn:JSONPath=".status.phase",name=Status,type=string
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".status.message",name=Message,type=string
|
||||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||||
|
|
||||||
// Runner is the Schema for the runners API
|
// Runner is the Schema for the runners API
|
||||||
@@ -338,11 +337,7 @@ func (r Runner) IsRegisterable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
now := metav1.Now()
|
now := metav1.Now()
|
||||||
if r.Status.Registration.ExpiresAt.Before(&now) {
|
return !r.Status.Registration.ExpiresAt.Before(&now)
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
// +kubebuilder:object:root=true
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ type RunnerDeploymentSpec struct {
|
|||||||
|
|
||||||
// EffectiveTime is the time the upstream controller requested to sync Replicas.
|
// EffectiveTime is the time the upstream controller requested to sync Replicas.
|
||||||
// It is usually populated by the webhook-based autoscaler via HRA.
|
// It is usually populated by the webhook-based autoscaler via HRA.
|
||||||
// The value is inherited to RunnerRepicaSet(s) and used to prevent ephemeral runners from unnecessarily recreated.
|
// The value is inherited to RunnerReplicaSet(s) and used to prevent ephemeral runners from unnecessarily recreated.
|
||||||
//
|
//
|
||||||
// +optional
|
// +optional
|
||||||
// +nullable
|
// +nullable
|
||||||
|
|||||||
@@ -90,6 +90,22 @@ func (in *CheckRunSpec) DeepCopy() *CheckRunSpec {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *GitHubAPICredentialsFrom) DeepCopyInto(out *GitHubAPICredentialsFrom) {
|
||||||
|
*out = *in
|
||||||
|
out.SecretRef = in.SecretRef
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubAPICredentialsFrom.
|
||||||
|
func (in *GitHubAPICredentialsFrom) DeepCopy() *GitHubAPICredentialsFrom {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(GitHubAPICredentialsFrom)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *GitHubEventScaleUpTriggerSpec) DeepCopyInto(out *GitHubEventScaleUpTriggerSpec) {
|
func (in *GitHubEventScaleUpTriggerSpec) DeepCopyInto(out *GitHubEventScaleUpTriggerSpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@@ -231,6 +247,11 @@ func (in *HorizontalRunnerAutoscalerSpec) DeepCopyInto(out *HorizontalRunnerAuto
|
|||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.GitHubAPICredentialsFrom != nil {
|
||||||
|
in, out := &in.GitHubAPICredentialsFrom, &out.GitHubAPICredentialsFrom
|
||||||
|
*out = new(GitHubAPICredentialsFrom)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerSpec.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerSpec.
|
||||||
@@ -425,6 +446,11 @@ func (in *RunnerConfig) DeepCopyInto(out *RunnerConfig) {
|
|||||||
*out = new(string)
|
*out = new(string)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.GitHubAPICredentialsFrom != nil {
|
||||||
|
in, out := &in.GitHubAPICredentialsFrom, &out.GitHubAPICredentialsFrom
|
||||||
|
*out = new(GitHubAPICredentialsFrom)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerConfig.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerConfig.
|
||||||
@@ -1136,6 +1162,21 @@ func (in *ScheduledOverride) DeepCopy() *ScheduledOverride {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
|
||||||
|
*out = *in
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
|
||||||
|
func (in *SecretReference) DeepCopy() *SecretReference {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(SecretReference)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *WorkVolumeClaimTemplate) DeepCopyInto(out *WorkVolumeClaimTemplate) {
|
func (in *WorkVolumeClaimTemplate) DeepCopyInto(out *WorkVolumeClaimTemplate) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
|||||||
4
build/version.go
Normal file
4
build/version.go
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
// This is overridden at build-time using go-build ldflags. dev is the fallback value
|
||||||
|
var Version = "NA"
|
||||||
@@ -15,10 +15,10 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.20.1
|
version: 0.21.1
|
||||||
|
|
||||||
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
||||||
appVersion: 0.25.1
|
appVersion: 0.26.0
|
||||||
|
|
||||||
home: https://github.com/actions-runner-controller/actions-runner-controller
|
home: https://github.com/actions-runner-controller/actions-runner-controller
|
||||||
|
|
||||||
|
|||||||
@@ -8,104 +8,105 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
|||||||
|
|
||||||
> _Default values are the defaults set in the charts `values.yaml`, some properties have default configurations in the code for when the property is omitted or invalid_
|
> _Default values are the defaults set in the charts `values.yaml`, some properties have default configurations in the code for when the property is omitted or invalid_
|
||||||
|
|
||||||
| Key | Description | Default |
|
| Key | Description | Default |
|
||||||
|----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------|
|
|----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------|
|
||||||
| `labels` | Set labels to apply to all resources in the chart | |
|
| `labels` | Set labels to apply to all resources in the chart | |
|
||||||
| `replicaCount` | Set the number of controller pods | 1 |
|
| `replicaCount` | Set the number of controller pods | 1 |
|
||||||
| `webhookPort` | Set the containerPort for the webhook Pod | 9443 |
|
| `webhookPort` | Set the containerPort for the webhook Pod | 9443 |
|
||||||
| `syncPeriod` | Set the period in which the controler reconciles the desired runners count | 10m |
|
| `syncPeriod` | Set the period in which the controller reconciles the desired runners count | 1m |
|
||||||
| `enableLeaderElection` | Enable election configuration | true |
|
| `enableLeaderElection` | Enable election configuration | true |
|
||||||
| `leaderElectionId` | Set the election ID for the controller group | |
|
| `leaderElectionId` | Set the election ID for the controller group | |
|
||||||
| `githubEnterpriseServerURL` | Set the URL for a self-hosted GitHub Enterprise Server | |
|
| `githubEnterpriseServerURL` | Set the URL for a self-hosted GitHub Enterprise Server | |
|
||||||
| `githubURL` | Override GitHub URL to be used for GitHub API calls | |
|
| `githubURL` | Override GitHub URL to be used for GitHub API calls | |
|
||||||
| `githubUploadURL` | Override GitHub Upload URL to be used for GitHub API calls | |
|
| `githubUploadURL` | Override GitHub Upload URL to be used for GitHub API calls | |
|
||||||
| `runnerGithubURL` | Override GitHub URL to be used by runners during registration | |
|
| `runnerGithubURL` | Override GitHub URL to be used by runners during registration | |
|
||||||
| `logLevel` | Set the log level of the controller container | |
|
| `logLevel` | Set the log level of the controller container | |
|
||||||
| `additionalVolumes` | Set additional volumes to add to the manager container | |
|
| `additionalVolumes` | Set additional volumes to add to the manager container | |
|
||||||
| `additionalVolumeMounts` | Set additional volume mounts to add to the manager container | |
|
| `additionalVolumeMounts` | Set additional volume mounts to add to the manager container | |
|
||||||
| `authSecret.create` | Deploy the controller auth secret | false |
|
| `authSecret.create` | Deploy the controller auth secret | false |
|
||||||
| `authSecret.name` | Set the name of the auth secret | controller-manager |
|
| `authSecret.name` | Set the name of the auth secret | controller-manager |
|
||||||
| `authSecret.annotations` | Set annotations for the auth Secret | |
|
| `authSecret.annotations` | Set annotations for the auth Secret | |
|
||||||
| `authSecret.github_app_id` | The ID of your GitHub App. **This can't be set at the same time as `authSecret.github_token`** | |
|
| `authSecret.github_app_id` | The ID of your GitHub App. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||||
| `authSecret.github_app_installation_id` | The ID of your GitHub App installation. **This can't be set at the same time as `authSecret.github_token`** | |
|
| `authSecret.github_app_installation_id` | The ID of your GitHub App installation. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||||
| `authSecret.github_app_private_key` | The multiline string of your GitHub App's private key. **This can't be set at the same time as `authSecret.github_token`** | |
|
| `authSecret.github_app_private_key` | The multiline string of your GitHub App's private key. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||||
| `authSecret.github_token` | Your chosen GitHub PAT token. **This can't be set at the same time as the `authSecret.github_app_*`** | |
|
| `authSecret.github_token` | Your chosen GitHub PAT token. **This can't be set at the same time as the `authSecret.github_app_*`** | |
|
||||||
| `authSecret.github_basicauth_username` | Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
|
| `authSecret.github_basicauth_username` | Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
|
||||||
| `authSecret.github_basicauth_password` | Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
|
| `authSecret.github_basicauth_password` | Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
|
||||||
| `dockerRegistryMirror` | The default Docker Registry Mirror used by runners. | |
|
| `dockerRegistryMirror` | The default Docker Registry Mirror used by runners. | |
|
||||||
| `hostNetwork` | The "hostNetwork" of the controller container | false |
|
| `hostNetwork` | The "hostNetwork" of the controller container | false |
|
||||||
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
|
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
|
||||||
| `image.tag` | The tag of the controller container | |
|
| `image.tag` | The tag of the controller container | |
|
||||||
| `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest |
|
| `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest |
|
||||||
| `image.actionsRunnerImagePullSecrets` | Optional image pull secrets to be included in the runner pod's ImagePullSecrets | |
|
| `image.actionsRunnerImagePullSecrets` | Optional image pull secrets to be included in the runner pod's ImagePullSecrets | |
|
||||||
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
|
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
|
||||||
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
|
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
|
||||||
| `metrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
| `metrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||||
| `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | |
|
| `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | |
|
||||||
| `metrics.port` | Set port of metrics service | 8443 |
|
| `metrics.port` | Set port of metrics service | 8443 |
|
||||||
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||||
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||||
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
|
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
|
||||||
| `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
| `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
||||||
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
|
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
|
||||||
| `fullnameOverride` | Override the full resource names | |
|
| `fullnameOverride` | Override the full resource names | |
|
||||||
| `nameOverride` | Override the resource name prefix | |
|
| `nameOverride` | Override the resource name prefix | |
|
||||||
| `serviceAccount.annotations` | Set annotations to the service account | |
|
| `serviceAccount.annotations` | Set annotations to the service account | |
|
||||||
| `serviceAccount.create` | Deploy the controller pod under a service account | true |
|
| `serviceAccount.create` | Deploy the controller pod under a service account | true |
|
||||||
| `podAnnotations` | Set annotations for the controller pod | |
|
| `podAnnotations` | Set annotations for the controller pod | |
|
||||||
| `podLabels` | Set labels for the controller pod | |
|
| `podLabels` | Set labels for the controller pod | |
|
||||||
| `serviceAccount.name` | Set the name of the service account | |
|
| `serviceAccount.name` | Set the name of the service account | |
|
||||||
| `securityContext` | Set the security context for each container in the controller pod | |
|
| `securityContext` | Set the security context for each container in the controller pod | |
|
||||||
| `podSecurityContext` | Set the security context to controller pod | |
|
| `podSecurityContext` | Set the security context to controller pod | |
|
||||||
| `service.annotations` | Set annotations for the provisioned webhook service resource | |
|
| `service.annotations` | Set annotations for the provisioned webhook service resource | |
|
||||||
| `service.port` | Set controller service ports | |
|
| `service.port` | Set controller service ports | |
|
||||||
| `service.type` | Set controller service type | |
|
| `service.type` | Set controller service type | |
|
||||||
| `topologySpreadConstraints` | Set the controller pod topologySpreadConstraints | |
|
| `topologySpreadConstraints` | Set the controller pod topologySpreadConstraints | |
|
||||||
| `nodeSelector` | Set the controller pod nodeSelector | |
|
| `nodeSelector` | Set the controller pod nodeSelector | |
|
||||||
| `resources` | Set the controller pod resources | |
|
| `resources` | Set the controller pod resources | |
|
||||||
| `affinity` | Set the controller pod affinity rules | |
|
| `affinity` | Set the controller pod affinity rules | |
|
||||||
| `podDisruptionBudget.enabled` | Enables a PDB to ensure HA of controller pods | false |
|
| `podDisruptionBudget.enabled` | Enables a PDB to ensure HA of controller pods | false |
|
||||||
| `podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
|
| `podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
|
||||||
| `podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
|
| `podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
|
||||||
| `tolerations` | Set the controller pod tolerations | |
|
| `tolerations` | Set the controller pod tolerations | |
|
||||||
| `env` | Set environment variables for the controller container | |
|
| `env` | Set environment variables for the controller container | |
|
||||||
| `priorityClassName` | Set the controller pod priorityClassName | |
|
| `priorityClassName` | Set the controller pod priorityClassName | |
|
||||||
| `scope.watchNamespace` | Tells the controller and the github webhook server which namespace to watch if `scope.singleNamespace` is true | `Release.Namespace` (the default namespace of the helm chart). |
|
| `scope.watchNamespace` | Tells the controller and the github webhook server which namespace to watch if `scope.singleNamespace` is true | `Release.Namespace` (the default namespace of the helm chart). |
|
||||||
| `scope.singleNamespace` | Limit the controller to watch a single namespace | false |
|
| `scope.singleNamespace` | Limit the controller to watch a single namespace | false |
|
||||||
| `certManagerEnabled` | Enable cert-manager. If disabled you must set admissionWebHooks.caBundle and create TLS secrets manually | true |
|
| `certManagerEnabled` | Enable cert-manager. If disabled you must set admissionWebHooks.caBundle and create TLS secrets manually | true |
|
||||||
| `admissionWebHooks.caBundle` | Base64-encoded PEM bundle containing the CA that signed the webhook's serving certificate | |
|
| `runner.statusUpdateHook.enabled` | Use custom RBAC for runners (role, role binding and service account), this will enable reporting runner statuses | false |
|
||||||
| `githubWebhookServer.logLevel` | Set the log level of the githubWebhookServer container | |
|
| `admissionWebHooks.caBundle` | Base64-encoded PEM bundle containing the CA that signed the webhook's serving certificate | |
|
||||||
| `githubWebhookServer.replicaCount` | Set the number of webhook server pods | 1 |
|
| `githubWebhookServer.logLevel` | Set the log level of the githubWebhookServer container | |
|
||||||
| `githubWebhookServer.useRunnerGroupsVisibility` | Enable supporting runner groups with custom visibility. This will incur in extra API calls and may blow up your budget. Currently, you also need to set `githubWebhookServer.secret.enabled` to enable this feature. | false |
|
| `githubWebhookServer.replicaCount` | Set the number of webhook server pods | 1 |
|
||||||
| `githubWebhookServer.syncPeriod` | Set the period in which the controller reconciles the resources | 10m |
|
| `githubWebhookServer.useRunnerGroupsVisibility` | Enable supporting runner groups with custom visibility. This will incur in extra API calls and may blow up your budget. Currently, you also need to set `githubWebhookServer.secret.enabled` to enable this feature. | false |
|
||||||
| `githubWebhookServer.enabled` | Deploy the webhook server pod | false |
|
| `githubWebhookServer.enabled` | Deploy the webhook server pod | false |
|
||||||
| `githubWebhookServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
|
| `githubWebhookServer.queueLimit` | Set the queue size limit in the githubWebhookServer | |
|
||||||
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | false |
|
| `githubWebhookServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
|
||||||
| `githubWebhookServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
|
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | false |
|
||||||
| `githubWebhookServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
|
| `githubWebhookServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
|
||||||
| `githubWebhookServer.imagePullSecrets` | Specifies the secret to be used when pulling the githubWebhookServer pod containers | |
|
| `githubWebhookServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
|
||||||
| `githubWebhookServer.nameOverride` | Override the resource name prefix | |
|
| `githubWebhookServer.imagePullSecrets` | Specifies the secret to be used when pulling the githubWebhookServer pod containers | |
|
||||||
| `githubWebhookServer.fullnameOverride` | Override the full resource names | |
|
| `githubWebhookServer.nameOverride` | Override the resource name prefix | |
|
||||||
| `githubWebhookServer.serviceAccount.create` | Deploy the githubWebhookServer under a service account | true |
|
| `githubWebhookServer.fullnameOverride` | Override the full resource names | |
|
||||||
| `githubWebhookServer.serviceAccount.annotations` | Set annotations for the service account | |
|
| `githubWebhookServer.serviceAccount.create` | Deploy the githubWebhookServer under a service account | true |
|
||||||
| `githubWebhookServer.serviceAccount.name` | Set the service account name | |
|
| `githubWebhookServer.serviceAccount.annotations` | Set annotations for the service account | |
|
||||||
| `githubWebhookServer.podAnnotations` | Set annotations for the githubWebhookServer pod | |
|
| `githubWebhookServer.serviceAccount.name` | Set the service account name | |
|
||||||
| `githubWebhookServer.podLabels` | Set labels for the githubWebhookServer pod | |
|
| `githubWebhookServer.podAnnotations` | Set annotations for the githubWebhookServer pod | |
|
||||||
| `githubWebhookServer.podSecurityContext` | Set the security context to githubWebhookServer pod | |
|
| `githubWebhookServer.podLabels` | Set labels for the githubWebhookServer pod | |
|
||||||
| `githubWebhookServer.securityContext` | Set the security context for each container in the githubWebhookServer pod | |
|
| `githubWebhookServer.podSecurityContext` | Set the security context to githubWebhookServer pod | |
|
||||||
| `githubWebhookServer.resources` | Set the githubWebhookServer pod resources | |
|
| `githubWebhookServer.securityContext` | Set the security context for each container in the githubWebhookServer pod | |
|
||||||
| `githubWebhookServer.topologySpreadConstraints` | Set the githubWebhookServer pod topologySpreadConstraints | |
|
| `githubWebhookServer.resources` | Set the githubWebhookServer pod resources | |
|
||||||
| `githubWebhookServer.nodeSelector` | Set the githubWebhookServer pod nodeSelector | |
|
| `githubWebhookServer.topologySpreadConstraints` | Set the githubWebhookServer pod topologySpreadConstraints | |
|
||||||
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
|
| `githubWebhookServer.nodeSelector` | Set the githubWebhookServer pod nodeSelector | |
|
||||||
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
|
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
|
||||||
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
|
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
|
||||||
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
|
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
|
||||||
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
|
||||||
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
|
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
||||||
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
|
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
|
||||||
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
|
||||||
| `githubWebhookServer.ingress.tls` | Set tls configuration for ingress | |
|
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
||||||
| `githubWebhookServer.ingress.ingressClassName` | Set ingress class name | |
|
| `githubWebhookServer.ingress.tls` | Set tls configuration for ingress | |
|
||||||
| `githubWebhookServer.podDisruptionBudget.enabled` | Enables a PDB to ensure HA of githubwebhook pods | false |
|
| `githubWebhookServer.ingress.ingressClassName` | Set ingress class name | |
|
||||||
| `githubWebhookServer.podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
|
| `githubWebhookServer.podDisruptionBudget.enabled` | Enables a PDB to ensure HA of githubwebhook pods | false |
|
||||||
| `githubWebhookServer.podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
|
| `githubWebhookServer.podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
|
||||||
|
| `githubWebhookServer.podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
|
||||||
|
|||||||
@@ -61,6 +61,16 @@ spec:
|
|||||||
type: integer
|
type: integer
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
githubAPICredentialsFrom:
|
||||||
|
properties:
|
||||||
|
secretRef:
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
maxReplicas:
|
maxReplicas:
|
||||||
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
|
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
|
||||||
type: integer
|
type: integer
|
||||||
@@ -92,7 +102,7 @@ spec:
|
|||||||
description: ScaleUpThreshold is the percentage of busy runners greater than which will trigger the hpa to scale runners up.
|
description: ScaleUpThreshold is the percentage of busy runners greater than which will trigger the hpa to scale runners up.
|
||||||
type: string
|
type: string
|
||||||
type:
|
type:
|
||||||
description: Type is the type of metric to be used for autoscaling. The only supported Type is TotalNumberOfQueuedAndInProgressWorkflowRuns
|
description: Type is the type of metric to be used for autoscaling. It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
@@ -170,7 +180,7 @@ spec:
|
|||||||
scheduledOverrides:
|
scheduledOverrides:
|
||||||
description: ScheduledOverrides is the list of ScheduledOverride. It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. The earlier a scheduled override is, the higher it is prioritized.
|
description: ScheduledOverrides is the list of ScheduledOverride. It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. The earlier a scheduled override is, the higher it is prioritized.
|
||||||
items:
|
items:
|
||||||
description: ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. A schedule can optionally be recurring, so that the correspoding override happens every day, week, month, or year.
|
description: ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
|
||||||
properties:
|
properties:
|
||||||
endTime:
|
endTime:
|
||||||
description: EndTime is the time at which the first override ends.
|
description: EndTime is the time at which the first override ends.
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ spec:
|
|||||||
description: RunnerDeploymentSpec defines the desired state of RunnerDeployment
|
description: RunnerDeploymentSpec defines the desired state of RunnerDeployment
|
||||||
properties:
|
properties:
|
||||||
effectiveTime:
|
effectiveTime:
|
||||||
description: EffectiveTime is the time the upstream controller requested to sync Replicas. It is usually populated by the webhook-based autoscaler via HRA. The value is inherited to RunnerRepicaSet(s) and used to prevent ephemeral runners from unnecessarily recreated.
|
description: EffectiveTime is the time the upstream controller requested to sync Replicas. It is usually populated by the webhook-based autoscaler via HRA. The value is inherited to RunnerReplicaSet(s) and used to prevent ephemeral runners from unnecessarily recreated.
|
||||||
format: date-time
|
format: date-time
|
||||||
nullable: true
|
nullable: true
|
||||||
type: string
|
type: string
|
||||||
@@ -946,7 +946,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -1381,6 +1381,9 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
type: object
|
type: object
|
||||||
|
dnsPolicy:
|
||||||
|
description: DNSPolicy defines how a pod's DNS will be configured.
|
||||||
|
type: string
|
||||||
dockerEnabled:
|
dockerEnabled:
|
||||||
type: boolean
|
type: boolean
|
||||||
dockerEnv:
|
dockerEnv:
|
||||||
@@ -1635,7 +1638,7 @@ spec:
|
|||||||
type: boolean
|
type: boolean
|
||||||
ephemeralContainers:
|
ephemeralContainers:
|
||||||
items:
|
items:
|
||||||
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted. \n This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate."
|
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted."
|
||||||
properties:
|
properties:
|
||||||
args:
|
args:
|
||||||
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
||||||
@@ -2415,6 +2418,16 @@ spec:
|
|||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
githubAPICredentialsFrom:
|
||||||
|
properties:
|
||||||
|
secretRef:
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
hostAliases:
|
hostAliases:
|
||||||
@@ -2815,7 +2828,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -3725,7 +3738,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -4193,16 +4206,28 @@ spec:
|
|||||||
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
|
matchLabelKeys:
|
||||||
|
description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
x-kubernetes-list-type: atomic
|
||||||
maxSkew:
|
maxSkew:
|
||||||
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
||||||
format: int32
|
format: int32
|
||||||
type: integer
|
type: integer
|
||||||
minDomains:
|
minDomains:
|
||||||
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate."
|
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)."
|
||||||
format: int32
|
format: int32
|
||||||
type: integer
|
type: integer
|
||||||
|
nodeAffinityPolicy:
|
||||||
|
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||||
|
type: string
|
||||||
|
nodeTaintsPolicy:
|
||||||
|
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||||
|
type: string
|
||||||
topologyKey:
|
topologyKey:
|
||||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||||
type: string
|
type: string
|
||||||
whenUnsatisfiable:
|
whenUnsatisfiable:
|
||||||
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
||||||
|
|||||||
@@ -943,7 +943,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -1378,6 +1378,9 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
type: object
|
type: object
|
||||||
|
dnsPolicy:
|
||||||
|
description: DNSPolicy defines how a pod's DNS will be configured.
|
||||||
|
type: string
|
||||||
dockerEnabled:
|
dockerEnabled:
|
||||||
type: boolean
|
type: boolean
|
||||||
dockerEnv:
|
dockerEnv:
|
||||||
@@ -1632,7 +1635,7 @@ spec:
|
|||||||
type: boolean
|
type: boolean
|
||||||
ephemeralContainers:
|
ephemeralContainers:
|
||||||
items:
|
items:
|
||||||
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted. \n This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate."
|
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted."
|
||||||
properties:
|
properties:
|
||||||
args:
|
args:
|
||||||
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
||||||
@@ -2412,6 +2415,16 @@ spec:
|
|||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
githubAPICredentialsFrom:
|
||||||
|
properties:
|
||||||
|
secretRef:
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
hostAliases:
|
hostAliases:
|
||||||
@@ -2812,7 +2825,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -3722,7 +3735,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -4190,16 +4203,28 @@ spec:
|
|||||||
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
|
matchLabelKeys:
|
||||||
|
description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
x-kubernetes-list-type: atomic
|
||||||
maxSkew:
|
maxSkew:
|
||||||
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
||||||
format: int32
|
format: int32
|
||||||
type: integer
|
type: integer
|
||||||
minDomains:
|
minDomains:
|
||||||
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate."
|
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)."
|
||||||
format: int32
|
format: int32
|
||||||
type: integer
|
type: integer
|
||||||
|
nodeAffinityPolicy:
|
||||||
|
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||||
|
type: string
|
||||||
|
nodeTaintsPolicy:
|
||||||
|
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||||
|
type: string
|
||||||
topologyKey:
|
topologyKey:
|
||||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||||
type: string
|
type: string
|
||||||
whenUnsatisfiable:
|
whenUnsatisfiable:
|
||||||
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
||||||
|
|||||||
@@ -24,12 +24,18 @@ spec:
|
|||||||
- jsonPath: .spec.repository
|
- jsonPath: .spec.repository
|
||||||
name: Repository
|
name: Repository
|
||||||
type: string
|
type: string
|
||||||
|
- jsonPath: .spec.group
|
||||||
|
name: Group
|
||||||
|
type: string
|
||||||
- jsonPath: .spec.labels
|
- jsonPath: .spec.labels
|
||||||
name: Labels
|
name: Labels
|
||||||
type: string
|
type: string
|
||||||
- jsonPath: .status.phase
|
- jsonPath: .status.phase
|
||||||
name: Status
|
name: Status
|
||||||
type: string
|
type: string
|
||||||
|
- jsonPath: .status.message
|
||||||
|
name: Message
|
||||||
|
type: string
|
||||||
- jsonPath: .metadata.creationTimestamp
|
- jsonPath: .metadata.creationTimestamp
|
||||||
name: Age
|
name: Age
|
||||||
type: date
|
type: date
|
||||||
@@ -884,7 +890,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -1319,6 +1325,9 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
type: object
|
type: object
|
||||||
|
dnsPolicy:
|
||||||
|
description: DNSPolicy defines how a pod's DNS will be configured.
|
||||||
|
type: string
|
||||||
dockerEnabled:
|
dockerEnabled:
|
||||||
type: boolean
|
type: boolean
|
||||||
dockerEnv:
|
dockerEnv:
|
||||||
@@ -1573,7 +1582,7 @@ spec:
|
|||||||
type: boolean
|
type: boolean
|
||||||
ephemeralContainers:
|
ephemeralContainers:
|
||||||
items:
|
items:
|
||||||
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted. \n This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate."
|
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted."
|
||||||
properties:
|
properties:
|
||||||
args:
|
args:
|
||||||
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
||||||
@@ -2353,6 +2362,16 @@ spec:
|
|||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
githubAPICredentialsFrom:
|
||||||
|
properties:
|
||||||
|
secretRef:
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
hostAliases:
|
hostAliases:
|
||||||
@@ -2753,7 +2772,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -3663,7 +3682,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -4131,16 +4150,28 @@ spec:
|
|||||||
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
|
matchLabelKeys:
|
||||||
|
description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
x-kubernetes-list-type: atomic
|
||||||
maxSkew:
|
maxSkew:
|
||||||
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
||||||
format: int32
|
format: int32
|
||||||
type: integer
|
type: integer
|
||||||
minDomains:
|
minDomains:
|
||||||
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate."
|
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)."
|
||||||
format: int32
|
format: int32
|
||||||
type: integer
|
type: integer
|
||||||
|
nodeAffinityPolicy:
|
||||||
|
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||||
|
type: string
|
||||||
|
nodeTaintsPolicy:
|
||||||
|
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||||
|
type: string
|
||||||
topologyKey:
|
topologyKey:
|
||||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||||
type: string
|
type: string
|
||||||
whenUnsatisfiable:
|
whenUnsatisfiable:
|
||||||
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
||||||
|
|||||||
@@ -67,6 +67,16 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
ephemeral:
|
ephemeral:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
githubAPICredentialsFrom:
|
||||||
|
properties:
|
||||||
|
secretRef:
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
image:
|
image:
|
||||||
@@ -76,7 +86,7 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
minReadySeconds:
|
minReadySeconds:
|
||||||
description: Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.
|
description: Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)
|
||||||
format: int32
|
format: int32
|
||||||
type: integer
|
type: integer
|
||||||
organization:
|
organization:
|
||||||
@@ -1006,7 +1016,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -1448,9 +1458,9 @@ spec:
|
|||||||
description: 'EnableServiceLinks indicates whether information about services should be injected into pod''s environment variables, matching the syntax of Docker links. Optional: Defaults to true.'
|
description: 'EnableServiceLinks indicates whether information about services should be injected into pod''s environment variables, matching the syntax of Docker links. Optional: Defaults to true.'
|
||||||
type: boolean
|
type: boolean
|
||||||
ephemeralContainers:
|
ephemeralContainers:
|
||||||
description: List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate.
|
description: List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.
|
||||||
items:
|
items:
|
||||||
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted. \n This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate."
|
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted."
|
||||||
properties:
|
properties:
|
||||||
args:
|
args:
|
||||||
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
||||||
@@ -2254,6 +2264,9 @@ spec:
|
|||||||
hostPID:
|
hostPID:
|
||||||
description: 'Use the host''s pid namespace. Optional: Default to false.'
|
description: 'Use the host''s pid namespace. Optional: Default to false.'
|
||||||
type: boolean
|
type: boolean
|
||||||
|
hostUsers:
|
||||||
|
description: 'Use the host''s user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.'
|
||||||
|
type: boolean
|
||||||
hostname:
|
hostname:
|
||||||
description: Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.
|
description: Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.
|
||||||
type: string
|
type: string
|
||||||
@@ -2638,7 +2651,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -3057,7 +3070,7 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
x-kubernetes-map-type: atomic
|
x-kubernetes-map-type: atomic
|
||||||
os:
|
os:
|
||||||
description: "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. \n If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions \n If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup This is a beta field and requires the IdentifyPodOS feature"
|
description: "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. \n If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions \n If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup"
|
||||||
properties:
|
properties:
|
||||||
name:
|
name:
|
||||||
description: 'Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null'
|
description: 'Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null'
|
||||||
@@ -3270,16 +3283,28 @@ spec:
|
|||||||
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
|
matchLabelKeys:
|
||||||
|
description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
x-kubernetes-list-type: atomic
|
||||||
maxSkew:
|
maxSkew:
|
||||||
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
||||||
format: int32
|
format: int32
|
||||||
type: integer
|
type: integer
|
||||||
minDomains:
|
minDomains:
|
||||||
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate."
|
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)."
|
||||||
format: int32
|
format: int32
|
||||||
type: integer
|
type: integer
|
||||||
|
nodeAffinityPolicy:
|
||||||
|
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||||
|
type: string
|
||||||
|
nodeTaintsPolicy:
|
||||||
|
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||||
|
type: string
|
||||||
topologyKey:
|
topologyKey:
|
||||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||||
type: string
|
type: string
|
||||||
whenUnsatisfiable:
|
whenUnsatisfiable:
|
||||||
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
||||||
|
|||||||
@@ -114,4 +114,4 @@ Create the name of the service account to use
|
|||||||
|
|
||||||
{{- define "actions-runner-controller.pdbName" -}}
|
{{- define "actions-runner-controller.pdbName" -}}
|
||||||
{{- include "actions-runner-controller.fullname" . | trunc 59 }}-pdb
|
{{- include "actions-runner-controller.fullname" . | trunc 59 }}-pdb
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ metadata:
|
|||||||
{{- toYaml . | nindent 4 }}
|
{{- toYaml . | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
name: {{ include "actions-runner-controller.serviceMonitorName" . }}
|
name: {{ include "actions-runner-controller.serviceMonitorName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
spec:
|
spec:
|
||||||
endpoints:
|
endpoints:
|
||||||
- path: /metrics
|
- path: /metrics
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
{{- if .Values.podDisruptionBudget.enabled }}
|
{{- if .Values.podDisruptionBudget.enabled }}
|
||||||
apiVersion: policy/v1beta1
|
apiVersion: policy/v1
|
||||||
kind: PodDisruptionBudget
|
kind: PodDisruptionBudget
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
|
|||||||
@@ -58,15 +58,15 @@ spec:
|
|||||||
{{- if .Values.scope.singleNamespace }}
|
{{- if .Values.scope.singleNamespace }}
|
||||||
- "--watch-namespace={{ default .Release.Namespace .Values.scope.watchNamespace }}"
|
- "--watch-namespace={{ default .Release.Namespace .Values.scope.watchNamespace }}"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.githubAPICacheDuration }}
|
|
||||||
- "--github-api-cache-duration={{ .Values.githubAPICacheDuration }}"
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.logLevel }}
|
{{- if .Values.logLevel }}
|
||||||
- "--log-level={{ .Values.logLevel }}"
|
- "--log-level={{ .Values.logLevel }}"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.runnerGithubURL }}
|
{{- if .Values.runnerGithubURL }}
|
||||||
- "--runner-github-url={{ .Values.runnerGithubURL }}"
|
- "--runner-github-url={{ .Values.runnerGithubURL }}"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- if .Values.runner.statusUpdateHook.enabled }}
|
||||||
|
- "--runner-status-update-hook"
|
||||||
|
{{- end }}
|
||||||
command:
|
command:
|
||||||
- "/manager"
|
- "/manager"
|
||||||
env:
|
env:
|
||||||
@@ -118,10 +118,14 @@ spec:
|
|||||||
name: {{ include "actions-runner-controller.secretName" . }}
|
name: {{ include "actions-runner-controller.secretName" . }}
|
||||||
optional: true
|
optional: true
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- if kindIs "slice" .Values.env }}
|
||||||
|
{{- toYaml .Values.env | nindent 8 }}
|
||||||
|
{{- else }}
|
||||||
{{- range $key, $val := .Values.env }}
|
{{- range $key, $val := .Values.env }}
|
||||||
- name: {{ $key }}
|
- name: {{ $key }}
|
||||||
value: {{ $val | quote }}
|
value: {{ $val | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
||||||
name: manager
|
name: manager
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
|||||||
@@ -39,7 +39,6 @@ spec:
|
|||||||
{{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
{{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
||||||
{{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }}
|
{{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }}
|
||||||
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
|
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
|
||||||
- "--sync-period={{ .Values.githubWebhookServer.syncPeriod }}"
|
|
||||||
{{- if .Values.githubWebhookServer.logLevel }}
|
{{- if .Values.githubWebhookServer.logLevel }}
|
||||||
- "--log-level={{ .Values.githubWebhookServer.logLevel }}"
|
- "--log-level={{ .Values.githubWebhookServer.logLevel }}"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -49,6 +48,9 @@ spec:
|
|||||||
{{- if .Values.runnerGithubURL }}
|
{{- if .Values.runnerGithubURL }}
|
||||||
- "--runner-github-url={{ .Values.runnerGithubURL }}"
|
- "--runner-github-url={{ .Values.runnerGithubURL }}"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- if .Values.githubWebhookServer.queueLimit }}
|
||||||
|
- "--queue-limit={{ .Values.githubWebhookServer.queueLimit }}"
|
||||||
|
{{- end }}
|
||||||
command:
|
command:
|
||||||
- "/github-webhook-server"
|
- "/github-webhook-server"
|
||||||
env:
|
env:
|
||||||
|
|||||||
@@ -1,13 +1,7 @@
|
|||||||
{{- if .Values.githubWebhookServer.ingress.enabled -}}
|
{{- if .Values.githubWebhookServer.ingress.enabled -}}
|
||||||
{{- $fullName := include "actions-runner-controller-github-webhook-server.fullname" . -}}
|
{{- $fullName := include "actions-runner-controller-github-webhook-server.fullname" . -}}
|
||||||
{{- $svcPort := (index .Values.githubWebhookServer.service.ports 0).port -}}
|
{{- $svcPort := (index .Values.githubWebhookServer.service.ports 0).port -}}
|
||||||
{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }}
|
|
||||||
apiVersion: networking.k8s.io/v1
|
apiVersion: networking.k8s.io/v1
|
||||||
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" }}
|
|
||||||
apiVersion: networking.k8s.io/v1beta1
|
|
||||||
{{- else if .Capabilities.APIVersions.Has "extensions/v1beta1/Ingress" }}
|
|
||||||
apiVersion: extensions/v1beta1
|
|
||||||
{{- end }}
|
|
||||||
kind: Ingress
|
kind: Ingress
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ $fullName }}
|
name: {{ $fullName }}
|
||||||
@@ -42,19 +36,12 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
{{- range .paths }}
|
{{- range .paths }}
|
||||||
- path: {{ .path }}
|
- path: {{ .path }}
|
||||||
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }}
|
|
||||||
pathType: {{ .pathType }}
|
pathType: {{ .pathType }}
|
||||||
{{- end }}
|
|
||||||
backend:
|
backend:
|
||||||
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }}
|
|
||||||
service:
|
service:
|
||||||
name: {{ $fullName }}
|
name: {{ $fullName }}
|
||||||
port:
|
port:
|
||||||
number: {{ $svcPort }}
|
number: {{ $svcPort }}
|
||||||
{{- else }}
|
|
||||||
serviceName: {{ $fullName }}
|
|
||||||
servicePort: {{ $svcPort }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
{{- if .Values.githubWebhookServer.podDisruptionBudget.enabled }}
|
{{- if .Values.githubWebhookServer.podDisruptionBudget.enabled }}
|
||||||
apiVersion: policy/v1beta1
|
apiVersion: policy/v1
|
||||||
kind: PodDisruptionBudget
|
kind: PodDisruptionBudget
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ metadata:
|
|||||||
{{- toYaml . | nindent 4 }}
|
{{- toYaml . | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
name: {{ include "actions-runner-controller-github-webhook-server.serviceMonitorName" . }}
|
name: {{ include "actions-runner-controller-github-webhook-server.serviceMonitorName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
spec:
|
spec:
|
||||||
endpoints:
|
endpoints:
|
||||||
- path: /metrics
|
- path: /metrics
|
||||||
|
|||||||
@@ -258,3 +258,64 @@ rules:
|
|||||||
- get
|
- get
|
||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
|
{{- if .Values.runner.statusUpdateHook.enabled }}
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- serviceaccounts
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- rolebindings
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- roles
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.rbac.allowGrantingKubernetesContainerModePermissions }}
|
||||||
|
{{/* These permissions are required by ARC to create RBAC resources for the runner pod to use the kubernetes container mode. */}}
|
||||||
|
{{/* See https://github.com/actions-runner-controller/actions-runner-controller/pull/1268/files#r917331632 */}}
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods/exec
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods/log
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- "batch"
|
||||||
|
resources:
|
||||||
|
- jobs
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- secrets
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -1,4 +1,8 @@
|
|||||||
|
{{/*
|
||||||
|
We will use a self managed CA if one is not provided by cert-manager
|
||||||
|
*/}}
|
||||||
|
{{- $ca := genCA "actions-runner-ca" 3650 }}
|
||||||
|
{{- $cert := genSignedCert (printf "%s.%s.svc" (include "actions-runner-controller.webhookServiceName" .) .Release.Namespace) nil (list (printf "%s.%s.svc" (include "actions-runner-controller.webhookServiceName" .) .Release.Namespace)) 3650 $ca }}
|
||||||
---
|
---
|
||||||
apiVersion: admissionregistration.k8s.io/v1
|
apiVersion: admissionregistration.k8s.io/v1
|
||||||
kind: MutatingWebhookConfiguration
|
kind: MutatingWebhookConfiguration
|
||||||
@@ -20,6 +24,8 @@ webhooks:
|
|||||||
clientConfig:
|
clientConfig:
|
||||||
{{- if .Values.admissionWebHooks.caBundle }}
|
{{- if .Values.admissionWebHooks.caBundle }}
|
||||||
caBundle: {{ quote .Values.admissionWebHooks.caBundle }}
|
caBundle: {{ quote .Values.admissionWebHooks.caBundle }}
|
||||||
|
{{- else if not .Values.certManagerEnabled }}
|
||||||
|
caBundle: {{ $ca.Cert | b64enc | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
@@ -48,6 +54,8 @@ webhooks:
|
|||||||
clientConfig:
|
clientConfig:
|
||||||
{{- if .Values.admissionWebHooks.caBundle }}
|
{{- if .Values.admissionWebHooks.caBundle }}
|
||||||
caBundle: {{ .Values.admissionWebHooks.caBundle }}
|
caBundle: {{ .Values.admissionWebHooks.caBundle }}
|
||||||
|
{{- else if not .Values.certManagerEnabled }}
|
||||||
|
caBundle: {{ $ca.Cert | b64enc | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
@@ -76,6 +84,8 @@ webhooks:
|
|||||||
clientConfig:
|
clientConfig:
|
||||||
{{- if .Values.admissionWebHooks.caBundle }}
|
{{- if .Values.admissionWebHooks.caBundle }}
|
||||||
caBundle: {{ .Values.admissionWebHooks.caBundle }}
|
caBundle: {{ .Values.admissionWebHooks.caBundle }}
|
||||||
|
{{- else if not .Values.certManagerEnabled }}
|
||||||
|
caBundle: {{ $ca.Cert | b64enc | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
@@ -104,6 +114,8 @@ webhooks:
|
|||||||
clientConfig:
|
clientConfig:
|
||||||
{{- if .Values.admissionWebHooks.caBundle }}
|
{{- if .Values.admissionWebHooks.caBundle }}
|
||||||
caBundle: {{ .Values.admissionWebHooks.caBundle }}
|
caBundle: {{ .Values.admissionWebHooks.caBundle }}
|
||||||
|
{{- else if not .Values.certManagerEnabled }}
|
||||||
|
caBundle: {{ $ca.Cert | b64enc | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
@@ -145,6 +157,8 @@ webhooks:
|
|||||||
clientConfig:
|
clientConfig:
|
||||||
{{- if .Values.admissionWebHooks.caBundle }}
|
{{- if .Values.admissionWebHooks.caBundle }}
|
||||||
caBundle: {{ .Values.admissionWebHooks.caBundle }}
|
caBundle: {{ .Values.admissionWebHooks.caBundle }}
|
||||||
|
{{- else if not .Values.certManagerEnabled }}
|
||||||
|
caBundle: {{ $ca.Cert | b64enc | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
@@ -173,6 +187,8 @@ webhooks:
|
|||||||
clientConfig:
|
clientConfig:
|
||||||
{{- if .Values.admissionWebHooks.caBundle }}
|
{{- if .Values.admissionWebHooks.caBundle }}
|
||||||
caBundle: {{ .Values.admissionWebHooks.caBundle }}
|
caBundle: {{ .Values.admissionWebHooks.caBundle }}
|
||||||
|
{{- else if not .Values.certManagerEnabled }}
|
||||||
|
caBundle: {{ $ca.Cert | b64enc | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
@@ -201,6 +217,8 @@ webhooks:
|
|||||||
clientConfig:
|
clientConfig:
|
||||||
{{- if .Values.admissionWebHooks.caBundle }}
|
{{- if .Values.admissionWebHooks.caBundle }}
|
||||||
caBundle: {{ .Values.admissionWebHooks.caBundle }}
|
caBundle: {{ .Values.admissionWebHooks.caBundle }}
|
||||||
|
{{- else if not .Values.certManagerEnabled }}
|
||||||
|
caBundle: {{ $ca.Cert | b64enc | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
@@ -219,3 +237,18 @@ webhooks:
|
|||||||
resources:
|
resources:
|
||||||
- runnerreplicasets
|
- runnerreplicasets
|
||||||
sideEffects: None
|
sideEffects: None
|
||||||
|
{{ if not (or .Values.admissionWebHooks.caBundle .Values.certManagerEnabled) }}
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: {{ include "actions-runner-controller.servingCertName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
|
type: kubernetes.io/tls
|
||||||
|
data:
|
||||||
|
tls.crt: {{ $cert.Cert | b64enc | quote }}
|
||||||
|
tls.key: {{ $cert.Key | b64enc | quote }}
|
||||||
|
ca.crt: {{ $ca.Cert | b64enc | quote }}
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -15,12 +15,6 @@ enableLeaderElection: true
|
|||||||
# Must be unique if more than one controller installed onto the same namespace.
|
# Must be unique if more than one controller installed onto the same namespace.
|
||||||
#leaderElectionId: "actions-runner-controller"
|
#leaderElectionId: "actions-runner-controller"
|
||||||
|
|
||||||
# DEPRECATED: This has been removed as unnecessary in #1192
|
|
||||||
# The controller tries its best not to repeat the duplicate GitHub API call
|
|
||||||
# within this duration.
|
|
||||||
# Defaults to syncPeriod - 10s.
|
|
||||||
#githubAPICacheDuration: 30s
|
|
||||||
|
|
||||||
# The URL of your GitHub Enterprise server, if you're using one.
|
# The URL of your GitHub Enterprise server, if you're using one.
|
||||||
#githubEnterpriseServerURL: https://github.example.com
|
#githubEnterpriseServerURL: https://github.example.com
|
||||||
|
|
||||||
@@ -67,6 +61,18 @@ imagePullSecrets: []
|
|||||||
nameOverride: ""
|
nameOverride: ""
|
||||||
fullnameOverride: ""
|
fullnameOverride: ""
|
||||||
|
|
||||||
|
runner:
|
||||||
|
statusUpdateHook:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
rbac:
|
||||||
|
{}
|
||||||
|
# # This allows ARC to dynamically create a ServiceAccount and a Role for each Runner pod that uses "kubernetes" container mode,
|
||||||
|
# # by extending ARC's manager role to have the same permissions required by the pod runs the runner agent in "kubernetes" container mode.
|
||||||
|
# # Without this, Kubernetes blocks ARC to create the role to prevent a priviledge escalation.
|
||||||
|
# # See https://github.com/actions-runner-controller/actions-runner-controller/pull/1268/files#r917327010
|
||||||
|
# allowGrantingKubernetesContainerModePermissions: true
|
||||||
|
|
||||||
serviceAccount:
|
serviceAccount:
|
||||||
# Specifies whether a service account should be created
|
# Specifies whether a service account should be created
|
||||||
create: true
|
create: true
|
||||||
@@ -109,7 +115,7 @@ metrics:
|
|||||||
enabled: true
|
enabled: true
|
||||||
image:
|
image:
|
||||||
repository: quay.io/brancz/kube-rbac-proxy
|
repository: quay.io/brancz/kube-rbac-proxy
|
||||||
tag: v0.13.0
|
tag: v0.13.1
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
{}
|
{}
|
||||||
@@ -143,10 +149,20 @@ priorityClassName: ""
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
{}
|
{}
|
||||||
|
# specify additional environment variables for the controller pod.
|
||||||
|
# It's possible to specify either key vale pairs e.g.:
|
||||||
# http_proxy: "proxy.com:8080"
|
# http_proxy: "proxy.com:8080"
|
||||||
# https_proxy: "proxy.com:8080"
|
# https_proxy: "proxy.com:8080"
|
||||||
# no_proxy: ""
|
# no_proxy: ""
|
||||||
|
|
||||||
|
# or a list of complete environment variable definitions e.g.:
|
||||||
|
# - name: GITHUB_APP_INSTALLATION_ID
|
||||||
|
# valueFrom:
|
||||||
|
# secretKeyRef:
|
||||||
|
# key: some_key_in_the_secret
|
||||||
|
# name: some-secret-name
|
||||||
|
# optional: true
|
||||||
|
|
||||||
## specify additional volumes to mount in the manager container, this can be used
|
## specify additional volumes to mount in the manager container, this can be used
|
||||||
## to specify additional storage of material or to inject files from ConfigMaps
|
## to specify additional storage of material or to inject files from ConfigMaps
|
||||||
## into the running container
|
## into the running container
|
||||||
@@ -175,7 +191,6 @@ admissionWebHooks:
|
|||||||
githubWebhookServer:
|
githubWebhookServer:
|
||||||
enabled: false
|
enabled: false
|
||||||
replicaCount: 1
|
replicaCount: 1
|
||||||
syncPeriod: 10m
|
|
||||||
useRunnerGroupsVisibility: false
|
useRunnerGroupsVisibility: false
|
||||||
secret:
|
secret:
|
||||||
enabled: false
|
enabled: false
|
||||||
@@ -255,3 +270,4 @@ githubWebhookServer:
|
|||||||
enabled: false
|
enabled: false
|
||||||
# minAvailable: 1
|
# minAvailable: 1
|
||||||
# maxUnavailable: 3
|
# maxUnavailable: 3
|
||||||
|
# queueLimit: 100
|
||||||
|
|||||||
@@ -69,10 +69,8 @@ func main() {
|
|||||||
|
|
||||||
watchNamespace string
|
watchNamespace string
|
||||||
|
|
||||||
enableLeaderElection bool
|
logLevel string
|
||||||
syncPeriod time.Duration
|
queueLimit int
|
||||||
logLevel string
|
|
||||||
queueLimit int
|
|
||||||
|
|
||||||
ghClient *github.Client
|
ghClient *github.Client
|
||||||
)
|
)
|
||||||
@@ -89,9 +87,6 @@ func main() {
|
|||||||
flag.StringVar(&webhookAddr, "webhook-addr", ":8000", "The address the metric endpoint binds to.")
|
flag.StringVar(&webhookAddr, "webhook-addr", ":8000", "The address the metric endpoint binds to.")
|
||||||
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
|
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
|
||||||
flag.StringVar(&watchNamespace, "watch-namespace", "", "The namespace to watch for HorizontalRunnerAutoscaler's to scale on Webhook. Set to empty for letting it watch for all namespaces.")
|
flag.StringVar(&watchNamespace, "watch-namespace", "", "The namespace to watch for HorizontalRunnerAutoscaler's to scale on Webhook. Set to empty for letting it watch for all namespaces.")
|
||||||
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
|
|
||||||
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
|
||||||
flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled. When you use autoscaling, set to a lower value like 10 minute, because this corresponds to the minimum time to react on demand change")
|
|
||||||
flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
|
flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
|
||||||
flag.IntVar(&queueLimit, "queue-limit", controllers.DefaultQueueLimit, `The maximum length of the scale operation queue. The scale opration is enqueued per every matching webhook event, and the server returns a 500 HTTP status when the queue was already full on enqueue attempt.`)
|
flag.IntVar(&queueLimit, "queue-limit", controllers.DefaultQueueLimit, `The maximum length of the scale operation queue. The scale opration is enqueued per every matching webhook event, and the server returns a 500 HTTP status when the queue was already full on enqueue attempt.`)
|
||||||
flag.StringVar(&webhookSecretToken, "github-webhook-secret-token", "", "The personal access token of GitHub.")
|
flag.StringVar(&webhookSecretToken, "github-webhook-secret-token", "", "The personal access token of GitHub.")
|
||||||
@@ -144,10 +139,10 @@ func main() {
|
|||||||
setupLog.Info("GitHub client is not initialized. Runner groups with custom visibility are not supported. If needed, please provide GitHub authentication. This will incur in extra GitHub API calls")
|
setupLog.Info("GitHub client is not initialized. Runner groups with custom visibility are not supported. If needed, please provide GitHub authentication. This will incur in extra GitHub API calls")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
syncPeriod := 10 * time.Minute
|
||||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||||
Scheme: scheme,
|
Scheme: scheme,
|
||||||
SyncPeriod: &syncPeriod,
|
SyncPeriod: &syncPeriod,
|
||||||
LeaderElection: enableLeaderElection,
|
|
||||||
Namespace: watchNamespace,
|
Namespace: watchNamespace,
|
||||||
MetricsBindAddress: metricsAddr,
|
MetricsBindAddress: metricsAddr,
|
||||||
Port: 9443,
|
Port: 9443,
|
||||||
|
|||||||
@@ -61,6 +61,16 @@ spec:
|
|||||||
type: integer
|
type: integer
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
githubAPICredentialsFrom:
|
||||||
|
properties:
|
||||||
|
secretRef:
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
maxReplicas:
|
maxReplicas:
|
||||||
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
|
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
|
||||||
type: integer
|
type: integer
|
||||||
@@ -92,7 +102,7 @@ spec:
|
|||||||
description: ScaleUpThreshold is the percentage of busy runners greater than which will trigger the hpa to scale runners up.
|
description: ScaleUpThreshold is the percentage of busy runners greater than which will trigger the hpa to scale runners up.
|
||||||
type: string
|
type: string
|
||||||
type:
|
type:
|
||||||
description: Type is the type of metric to be used for autoscaling. The only supported Type is TotalNumberOfQueuedAndInProgressWorkflowRuns
|
description: Type is the type of metric to be used for autoscaling. It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
@@ -170,7 +180,7 @@ spec:
|
|||||||
scheduledOverrides:
|
scheduledOverrides:
|
||||||
description: ScheduledOverrides is the list of ScheduledOverride. It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. The earlier a scheduled override is, the higher it is prioritized.
|
description: ScheduledOverrides is the list of ScheduledOverride. It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. The earlier a scheduled override is, the higher it is prioritized.
|
||||||
items:
|
items:
|
||||||
description: ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. A schedule can optionally be recurring, so that the correspoding override happens every day, week, month, or year.
|
description: ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
|
||||||
properties:
|
properties:
|
||||||
endTime:
|
endTime:
|
||||||
description: EndTime is the time at which the first override ends.
|
description: EndTime is the time at which the first override ends.
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ spec:
|
|||||||
description: RunnerDeploymentSpec defines the desired state of RunnerDeployment
|
description: RunnerDeploymentSpec defines the desired state of RunnerDeployment
|
||||||
properties:
|
properties:
|
||||||
effectiveTime:
|
effectiveTime:
|
||||||
description: EffectiveTime is the time the upstream controller requested to sync Replicas. It is usually populated by the webhook-based autoscaler via HRA. The value is inherited to RunnerRepicaSet(s) and used to prevent ephemeral runners from unnecessarily recreated.
|
description: EffectiveTime is the time the upstream controller requested to sync Replicas. It is usually populated by the webhook-based autoscaler via HRA. The value is inherited to RunnerReplicaSet(s) and used to prevent ephemeral runners from unnecessarily recreated.
|
||||||
format: date-time
|
format: date-time
|
||||||
nullable: true
|
nullable: true
|
||||||
type: string
|
type: string
|
||||||
@@ -946,7 +946,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -1381,6 +1381,9 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
type: object
|
type: object
|
||||||
|
dnsPolicy:
|
||||||
|
description: DNSPolicy defines how a pod's DNS will be configured.
|
||||||
|
type: string
|
||||||
dockerEnabled:
|
dockerEnabled:
|
||||||
type: boolean
|
type: boolean
|
||||||
dockerEnv:
|
dockerEnv:
|
||||||
@@ -1635,7 +1638,7 @@ spec:
|
|||||||
type: boolean
|
type: boolean
|
||||||
ephemeralContainers:
|
ephemeralContainers:
|
||||||
items:
|
items:
|
||||||
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted. \n This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate."
|
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted."
|
||||||
properties:
|
properties:
|
||||||
args:
|
args:
|
||||||
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
||||||
@@ -2415,6 +2418,16 @@ spec:
|
|||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
githubAPICredentialsFrom:
|
||||||
|
properties:
|
||||||
|
secretRef:
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
hostAliases:
|
hostAliases:
|
||||||
@@ -2815,7 +2828,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -3725,7 +3738,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -4193,16 +4206,28 @@ spec:
|
|||||||
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
|
matchLabelKeys:
|
||||||
|
description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
x-kubernetes-list-type: atomic
|
||||||
maxSkew:
|
maxSkew:
|
||||||
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
||||||
format: int32
|
format: int32
|
||||||
type: integer
|
type: integer
|
||||||
minDomains:
|
minDomains:
|
||||||
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate."
|
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)."
|
||||||
format: int32
|
format: int32
|
||||||
type: integer
|
type: integer
|
||||||
|
nodeAffinityPolicy:
|
||||||
|
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||||
|
type: string
|
||||||
|
nodeTaintsPolicy:
|
||||||
|
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||||
|
type: string
|
||||||
topologyKey:
|
topologyKey:
|
||||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||||
type: string
|
type: string
|
||||||
whenUnsatisfiable:
|
whenUnsatisfiable:
|
||||||
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
||||||
|
|||||||
@@ -943,7 +943,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -1378,6 +1378,9 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
type: object
|
type: object
|
||||||
|
dnsPolicy:
|
||||||
|
description: DNSPolicy defines how a pod's DNS will be configured.
|
||||||
|
type: string
|
||||||
dockerEnabled:
|
dockerEnabled:
|
||||||
type: boolean
|
type: boolean
|
||||||
dockerEnv:
|
dockerEnv:
|
||||||
@@ -1632,7 +1635,7 @@ spec:
|
|||||||
type: boolean
|
type: boolean
|
||||||
ephemeralContainers:
|
ephemeralContainers:
|
||||||
items:
|
items:
|
||||||
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted. \n This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate."
|
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted."
|
||||||
properties:
|
properties:
|
||||||
args:
|
args:
|
||||||
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
||||||
@@ -2412,6 +2415,16 @@ spec:
|
|||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
githubAPICredentialsFrom:
|
||||||
|
properties:
|
||||||
|
secretRef:
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
hostAliases:
|
hostAliases:
|
||||||
@@ -2812,7 +2825,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -3722,7 +3735,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -4190,16 +4203,28 @@ spec:
|
|||||||
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
|
matchLabelKeys:
|
||||||
|
description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
x-kubernetes-list-type: atomic
|
||||||
maxSkew:
|
maxSkew:
|
||||||
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
||||||
format: int32
|
format: int32
|
||||||
type: integer
|
type: integer
|
||||||
minDomains:
|
minDomains:
|
||||||
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate."
|
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)."
|
||||||
format: int32
|
format: int32
|
||||||
type: integer
|
type: integer
|
||||||
|
nodeAffinityPolicy:
|
||||||
|
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||||
|
type: string
|
||||||
|
nodeTaintsPolicy:
|
||||||
|
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||||
|
type: string
|
||||||
topologyKey:
|
topologyKey:
|
||||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||||
type: string
|
type: string
|
||||||
whenUnsatisfiable:
|
whenUnsatisfiable:
|
||||||
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
||||||
|
|||||||
@@ -24,12 +24,18 @@ spec:
|
|||||||
- jsonPath: .spec.repository
|
- jsonPath: .spec.repository
|
||||||
name: Repository
|
name: Repository
|
||||||
type: string
|
type: string
|
||||||
|
- jsonPath: .spec.group
|
||||||
|
name: Group
|
||||||
|
type: string
|
||||||
- jsonPath: .spec.labels
|
- jsonPath: .spec.labels
|
||||||
name: Labels
|
name: Labels
|
||||||
type: string
|
type: string
|
||||||
- jsonPath: .status.phase
|
- jsonPath: .status.phase
|
||||||
name: Status
|
name: Status
|
||||||
type: string
|
type: string
|
||||||
|
- jsonPath: .status.message
|
||||||
|
name: Message
|
||||||
|
type: string
|
||||||
- jsonPath: .metadata.creationTimestamp
|
- jsonPath: .metadata.creationTimestamp
|
||||||
name: Age
|
name: Age
|
||||||
type: date
|
type: date
|
||||||
@@ -884,7 +890,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -1319,6 +1325,9 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
type: object
|
type: object
|
||||||
|
dnsPolicy:
|
||||||
|
description: DNSPolicy defines how a pod's DNS will be configured.
|
||||||
|
type: string
|
||||||
dockerEnabled:
|
dockerEnabled:
|
||||||
type: boolean
|
type: boolean
|
||||||
dockerEnv:
|
dockerEnv:
|
||||||
@@ -1573,7 +1582,7 @@ spec:
|
|||||||
type: boolean
|
type: boolean
|
||||||
ephemeralContainers:
|
ephemeralContainers:
|
||||||
items:
|
items:
|
||||||
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted. \n This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate."
|
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted."
|
||||||
properties:
|
properties:
|
||||||
args:
|
args:
|
||||||
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
||||||
@@ -2353,6 +2362,16 @@ spec:
|
|||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
githubAPICredentialsFrom:
|
||||||
|
properties:
|
||||||
|
secretRef:
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
hostAliases:
|
hostAliases:
|
||||||
@@ -2753,7 +2772,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -3663,7 +3682,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -4131,16 +4150,28 @@ spec:
|
|||||||
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
|
matchLabelKeys:
|
||||||
|
description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
x-kubernetes-list-type: atomic
|
||||||
maxSkew:
|
maxSkew:
|
||||||
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
||||||
format: int32
|
format: int32
|
||||||
type: integer
|
type: integer
|
||||||
minDomains:
|
minDomains:
|
||||||
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate."
|
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)."
|
||||||
format: int32
|
format: int32
|
||||||
type: integer
|
type: integer
|
||||||
|
nodeAffinityPolicy:
|
||||||
|
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||||
|
type: string
|
||||||
|
nodeTaintsPolicy:
|
||||||
|
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||||
|
type: string
|
||||||
topologyKey:
|
topologyKey:
|
||||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||||
type: string
|
type: string
|
||||||
whenUnsatisfiable:
|
whenUnsatisfiable:
|
||||||
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
||||||
|
|||||||
@@ -67,6 +67,16 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
ephemeral:
|
ephemeral:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
githubAPICredentialsFrom:
|
||||||
|
properties:
|
||||||
|
secretRef:
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
image:
|
image:
|
||||||
@@ -76,7 +86,7 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
minReadySeconds:
|
minReadySeconds:
|
||||||
description: Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.
|
description: Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)
|
||||||
format: int32
|
format: int32
|
||||||
type: integer
|
type: integer
|
||||||
organization:
|
organization:
|
||||||
@@ -1006,7 +1016,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -1448,9 +1458,9 @@ spec:
|
|||||||
description: 'EnableServiceLinks indicates whether information about services should be injected into pod''s environment variables, matching the syntax of Docker links. Optional: Defaults to true.'
|
description: 'EnableServiceLinks indicates whether information about services should be injected into pod''s environment variables, matching the syntax of Docker links. Optional: Defaults to true.'
|
||||||
type: boolean
|
type: boolean
|
||||||
ephemeralContainers:
|
ephemeralContainers:
|
||||||
description: List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate.
|
description: List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.
|
||||||
items:
|
items:
|
||||||
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted. \n This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate."
|
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted."
|
||||||
properties:
|
properties:
|
||||||
args:
|
args:
|
||||||
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
||||||
@@ -2254,6 +2264,9 @@ spec:
|
|||||||
hostPID:
|
hostPID:
|
||||||
description: 'Use the host''s pid namespace. Optional: Default to false.'
|
description: 'Use the host''s pid namespace. Optional: Default to false.'
|
||||||
type: boolean
|
type: boolean
|
||||||
|
hostUsers:
|
||||||
|
description: 'Use the host''s user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.'
|
||||||
|
type: boolean
|
||||||
hostname:
|
hostname:
|
||||||
description: Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.
|
description: Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.
|
||||||
type: string
|
type: string
|
||||||
@@ -2638,7 +2651,7 @@ spec:
|
|||||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||||
type: string
|
type: string
|
||||||
ports:
|
ports:
|
||||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||||
items:
|
items:
|
||||||
description: ContainerPort represents a network port in a single container.
|
description: ContainerPort represents a network port in a single container.
|
||||||
properties:
|
properties:
|
||||||
@@ -3057,7 +3070,7 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
x-kubernetes-map-type: atomic
|
x-kubernetes-map-type: atomic
|
||||||
os:
|
os:
|
||||||
description: "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. \n If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions \n If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup This is a beta field and requires the IdentifyPodOS feature"
|
description: "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. \n If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions \n If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup"
|
||||||
properties:
|
properties:
|
||||||
name:
|
name:
|
||||||
description: 'Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null'
|
description: 'Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null'
|
||||||
@@ -3270,16 +3283,28 @@ spec:
|
|||||||
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
|
matchLabelKeys:
|
||||||
|
description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
x-kubernetes-list-type: atomic
|
||||||
maxSkew:
|
maxSkew:
|
||||||
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
||||||
format: int32
|
format: int32
|
||||||
type: integer
|
type: integer
|
||||||
minDomains:
|
minDomains:
|
||||||
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate."
|
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)."
|
||||||
format: int32
|
format: int32
|
||||||
type: integer
|
type: integer
|
||||||
|
nodeAffinityPolicy:
|
||||||
|
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||||
|
type: string
|
||||||
|
nodeTaintsPolicy:
|
||||||
|
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||||
|
type: string
|
||||||
topologyKey:
|
topologyKey:
|
||||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||||
type: string
|
type: string
|
||||||
whenUnsatisfiable:
|
whenUnsatisfiable:
|
||||||
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
||||||
|
|||||||
@@ -258,3 +258,27 @@ rules:
|
|||||||
- get
|
- get
|
||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- serviceaccounts
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- rolebindings
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- roles
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
|||||||
6
contrib/README.md
Normal file
6
contrib/README.md
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
The `contrib` directory is the place for sharing various example code for deploying and operating `actions-runner-controller`.
|
||||||
|
|
||||||
|
Anything contained in this directory is provided as-is. The maintainers of `actions-runner-controller` is not yet commited to provide
|
||||||
|
full support for using, fixing, and enhancing it. However, they will do their best effort to collect feedbacks from early adopters and advanced users like you, and may eventually consider graduating any of the examples as an official addition to the project.
|
||||||
|
|
||||||
|
See https://github.com/actions-runner-controller/actions-runner-controller/pull/1375#issuecomment-1258816470 and https://github.com/actions-runner-controller/actions-runner-controller/pull/1559#issuecomment-1258827496 for more context.
|
||||||
25
contrib/examples/actions-runner/.helmignore
Normal file
25
contrib/examples/actions-runner/.helmignore
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# Patterns to ignore when building packages.
|
||||||
|
# This supports shell glob matching, relative path matching, and
|
||||||
|
# negation (prefixed with !). Only one pattern per line.
|
||||||
|
.DS_Store
|
||||||
|
# Common VCS dirs
|
||||||
|
.git/
|
||||||
|
.gitignore
|
||||||
|
.bzr/
|
||||||
|
.bzrignore
|
||||||
|
.hg/
|
||||||
|
.hgignore
|
||||||
|
.svn/
|
||||||
|
# Common backup files
|
||||||
|
*.swp
|
||||||
|
*.bak
|
||||||
|
*.tmp
|
||||||
|
*.orig
|
||||||
|
*~
|
||||||
|
# Various IDEs
|
||||||
|
.project
|
||||||
|
.idea/
|
||||||
|
*.tmproj
|
||||||
|
.vscode/
|
||||||
|
# Docs
|
||||||
|
docs/
|
||||||
10
contrib/examples/actions-runner/Chart.yaml
Normal file
10
contrib/examples/actions-runner/Chart.yaml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
apiVersion: v2
|
||||||
|
name: actions-runner
|
||||||
|
description: Helm Chart for Github Actions Runner
|
||||||
|
type: application
|
||||||
|
version: 0.0.1
|
||||||
|
appVersion: 2.290.1
|
||||||
|
|
||||||
|
home: https://github.com/actions-runner-controller/actions-runner-controller/tree/master/runner
|
||||||
|
sources:
|
||||||
|
- https://github.com/actions-runner-controller/actions-runner-controller/tree/master/runner
|
||||||
36
contrib/examples/actions-runner/README.md
Normal file
36
contrib/examples/actions-runner/README.md
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
## Docs
|
||||||
|
|
||||||
|
All additional docs are kept in the `docs/` folder, this README is solely for documenting the values.yaml keys and values
|
||||||
|
|
||||||
|
## Values
|
||||||
|
|
||||||
|
**_The values are documented as of HEAD, to review the configuration options for your chart version ensure you view this file at the relevent [tag](https://github.com/actions-runner-controller/actions-runner-controller/tags)_**
|
||||||
|
|
||||||
|
> _Default values are the defaults set in the charts values.yaml, some properties have default configurations in the code for when the property is omitted or invalid_
|
||||||
|
|
||||||
|
| Key | Description | Default |
|
||||||
|
|----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------|
|
||||||
|
| `labels` | Set labels to apply to all resources in the chart | |
|
||||||
|
| `replicaCount` | Set the number of runner pods | 1 |
|
||||||
|
| `image.repository` | The "repository/image" of the runner container | summerwind/actions-runner |
|
||||||
|
| `image.tag` | The tag of the runner container | |
|
||||||
|
| `image.pullPolicy` | The pull policy of the runner image | IfNotPresent |
|
||||||
|
| `imagePullSecrets` | Specifies the secret to be used when pulling the runner pod containers | |
|
||||||
|
| `fullnameOverride` | Override the full resource names | |
|
||||||
|
| `nameOverride` | Override the resource name prefix | |
|
||||||
|
| `podAnnotations` | Set annotations for the runner pod | |
|
||||||
|
| `podLabels` | Set labels for the runner pod | |
|
||||||
|
| `podSecurityContext` | Set the security context to runner pod | |
|
||||||
|
| `nodeSelector` | Set the pod nodeSelector | |
|
||||||
|
| `affinity` | Set the runner pod affinity rules | |
|
||||||
|
| `tolerations` | Set the runner pod tolerations | |
|
||||||
|
| `env` | Set environment variables for the runner container | |
|
||||||
|
| `organization` | Github organization where runner will be registered | test |
|
||||||
|
| `repository` | Github repository where runner will be registered | |
|
||||||
|
| `runnerLabels` | Labels you want to add in your runner | test |
|
||||||
|
| `autoscaler.enabled` | Enable the HorizontalRunnerAutoscaler, if its enabled then replica count will not be used | true |
|
||||||
|
| `autoscaler.minReplicas` | Minimum no of replicas | 1 |
|
||||||
|
| `autoscaler.maxReplicas` | Maximum no of replicas | 5 |
|
||||||
|
| `autoscaler.scaleDownDelaySecondsAfterScaleOut` | [Anti-Flapping Configuration](https://github.com/actions-runner-controller/actions-runner-controller#anti-flapping-configuration) | 120 |
|
||||||
|
| `autoscaler.metrics` | [Pull driven scaling](https://github.com/actions-runner-controller/actions-runner-controller#pull-driven-scaling) | default |
|
||||||
|
| `autoscaler.scaleUpTriggers` | [Webhook driven scaling](https://github.com/actions-runner-controller/actions-runner-controller#webhook-driven-scaling) | |
|
||||||
54
contrib/examples/actions-runner/templates/_helpers.tpl
Normal file
54
contrib/examples/actions-runner/templates/_helpers.tpl
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
{{/*
|
||||||
|
Expand the name of the chart.
|
||||||
|
*/}}
|
||||||
|
{{- define "actions-runner.name" -}}
|
||||||
|
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default fully qualified app name.
|
||||||
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
|
If release name contains chart name it will be used as a full name.
|
||||||
|
*/}}
|
||||||
|
{{- define "actions-runner.fullname" -}}
|
||||||
|
{{- if .Values.fullnameOverride }}
|
||||||
|
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||||
|
{{- if contains $name .Release.Name }}
|
||||||
|
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create chart name and version as used by the chart label.
|
||||||
|
*/}}
|
||||||
|
{{- define "actions-runner.chart" -}}
|
||||||
|
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Common labels
|
||||||
|
*/}}
|
||||||
|
{{- define "actions-runner.labels" -}}
|
||||||
|
helm.sh/chart: {{ include "actions-runner.chart" . }}
|
||||||
|
{{ include "actions-runner.selectorLabels" . }}
|
||||||
|
{{- if .Chart.AppVersion }}
|
||||||
|
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||||
|
{{- end }}
|
||||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
|
{{- range $k, $v := .Values.labels }}
|
||||||
|
{{ $k }}: {{ $v }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Selector labels
|
||||||
|
*/}}
|
||||||
|
{{- define "actions-runner.selectorLabels" -}}
|
||||||
|
app.kubernetes.io/name: {{ include "actions-runner.name" . }}
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
|
{{- end }}
|
||||||
96
contrib/examples/actions-runner/templates/deployment.yaml
Normal file
96
contrib/examples/actions-runner/templates/deployment.yaml
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: List
|
||||||
|
items:
|
||||||
|
- apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: RunnerDeployment
|
||||||
|
metadata:
|
||||||
|
name: {{ include "actions-runner.fullname" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner.labels" . | nindent 6 }}
|
||||||
|
spec:
|
||||||
|
{{- if not .Values.autoscaler.enabled }}
|
||||||
|
replicas: {{ .Values.replicaCount }}
|
||||||
|
{{- end }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "actions-runner.selectorLabels" . | nindent 8 }}
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
{{- with .Values.podAnnotations }}
|
||||||
|
annotations:
|
||||||
|
kubectl.kubernetes.io/default-logs-container: "runner"
|
||||||
|
{{- toYaml . | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner.selectorLabels" . | nindent 10 }}
|
||||||
|
{{- with .Values.podLabels }}
|
||||||
|
{{- toYaml . | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
{{- with .Values.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml . | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.podSecurityContext | nindent 10 }}
|
||||||
|
{{- with .Values.priorityClassName }}
|
||||||
|
priorityClassName: "{{ . }}"
|
||||||
|
{{- end }}
|
||||||
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
{{- if .Values.organization }}
|
||||||
|
organization: {{ .Values.organization }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.repository }}
|
||||||
|
repository: {{ .Values.repository }}
|
||||||
|
{{- end }}
|
||||||
|
group: {{ .Values.group | default "Default" }}
|
||||||
|
{{- with .Values.runnerLabels }}
|
||||||
|
labels:
|
||||||
|
{{- toYaml . | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml . | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml . | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml . | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.env }}
|
||||||
|
env:
|
||||||
|
{{- range $key, $val := .Values.env }}
|
||||||
|
- name: {{ $key }}
|
||||||
|
value: {{ $val | quote }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.autoscaler.enabled }}
|
||||||
|
- apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: HorizontalRunnerAutoscaler
|
||||||
|
metadata:
|
||||||
|
name: {{ (list (include "actions-runner.fullname" .) "autoscaler" | join "-") }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner.labels" . | nindent 6 }}
|
||||||
|
spec:
|
||||||
|
{{- if .Values.autoscaler.scaleDownDelaySecondsAfterScaleOut }}
|
||||||
|
scaleDownDelaySecondsAfterScaleOut: {{ .Values.autoscaler.scaleDownDelaySecondsAfterScaleOut }}
|
||||||
|
{{- end }}
|
||||||
|
scaleTargetRef:
|
||||||
|
name: {{ include "actions-runner.fullname" . }}
|
||||||
|
minReplicas: {{ .Values.autoscaler.minReplicas }}
|
||||||
|
maxReplicas: {{ .Values.autoscaler.maxReplicas }}
|
||||||
|
{{- with .Values.autoscaler.scaleUpTriggers }}
|
||||||
|
scaleUpTriggers:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.autoscaler.metrics }}
|
||||||
|
metrics:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
63
contrib/examples/actions-runner/values.yaml
Normal file
63
contrib/examples/actions-runner/values.yaml
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
image:
|
||||||
|
repository: summerwind/actions-runner
|
||||||
|
tag: v2.290.1-ubuntu-20.04
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
# Create runner for an organization or a repository
|
||||||
|
# Set only one of the two either organization or repository
|
||||||
|
# By default, it creates runner under github organization test
|
||||||
|
organization: test
|
||||||
|
# repository: mumoshu/actions-runner-controller-ci
|
||||||
|
|
||||||
|
# Labels you want to add in your runner
|
||||||
|
runnerLabels:
|
||||||
|
- test
|
||||||
|
|
||||||
|
# If you enable Autoscaler, then it will not be used
|
||||||
|
replicaCount: 1
|
||||||
|
|
||||||
|
# The Runner Group that the runner(s) should be associated with.
|
||||||
|
# See https://docs.github.com/en/github-ae@latest/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups.
|
||||||
|
group: Default
|
||||||
|
|
||||||
|
autoscaler:
|
||||||
|
enabled: true
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 5
|
||||||
|
scaleDownDelaySecondsAfterScaleOut: 120
|
||||||
|
# metrics (pull method) / scaleUpTriggers (push method)
|
||||||
|
# https://github.com/actions-runner-controller/actions-runner-controller#pull-driven-scaling
|
||||||
|
# https://github.com/actions-runner-controller/actions-runner-controller#webhook-driven-scaling
|
||||||
|
metrics:
|
||||||
|
- type: PercentageRunnersBusy
|
||||||
|
scaleUpThreshold: '0.75'
|
||||||
|
scaleDownThreshold: '0.25'
|
||||||
|
scaleUpFactor: '2'
|
||||||
|
scaleDownFactor: '0.5'
|
||||||
|
# scaleUpTriggers:
|
||||||
|
# - githubEvent: {}
|
||||||
|
# duration: "5m"
|
||||||
|
|
||||||
|
podAnnotations: {}
|
||||||
|
|
||||||
|
podLabels: {}
|
||||||
|
|
||||||
|
imagePullSecrets: []
|
||||||
|
|
||||||
|
podSecurityContext:
|
||||||
|
{}
|
||||||
|
# fsGroup: 2000
|
||||||
|
|
||||||
|
# Leverage a PriorityClass to ensure your pods survive resource shortages
|
||||||
|
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||||
|
# PriorityClass: system-cluster-critical
|
||||||
|
priorityClassName: ""
|
||||||
|
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
tolerations: []
|
||||||
|
|
||||||
|
affinity: {}
|
||||||
|
|
||||||
|
env:
|
||||||
|
{}
|
||||||
60
contrib/examples/terraform/actions-runner-controller.tf
Normal file
60
contrib/examples/terraform/actions-runner-controller.tf
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
### Deploying with exposed github token
|
||||||
|
|
||||||
|
resource "kubernetes_namespace" "arc" {
|
||||||
|
metadata {
|
||||||
|
name = "actions-runner-system"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "helm_release" "actions-runner-controller" {
|
||||||
|
count = var.actions_runner_controller
|
||||||
|
name = "actions-runner-controller"
|
||||||
|
namespace = kubernetes_namespace.arc.metadata[0].name
|
||||||
|
create_namespace = true
|
||||||
|
chart = "actions-runner-controller"
|
||||||
|
repository = "https://actions-runner-controller.github.io/actions-runner-controller"
|
||||||
|
version = "v0.19.1"
|
||||||
|
values = [<<EOF
|
||||||
|
authSecret:
|
||||||
|
github_token: hdjasyd7das7d7asd78as87dasdas
|
||||||
|
create: true
|
||||||
|
EOF
|
||||||
|
]
|
||||||
|
depends_on = [resource.helm_release.cm]
|
||||||
|
}
|
||||||
|
|
||||||
|
#============================================================================================================================================
|
||||||
|
### Deploying with secret manager like AWS's
|
||||||
|
# make sure the name of the secret is the same as secret_id
|
||||||
|
|
||||||
|
data "aws_secretsmanager_secret_version" "creds" {
|
||||||
|
secret_id = "github/access_token"
|
||||||
|
}
|
||||||
|
locals {
|
||||||
|
github_creds = jsondecode(
|
||||||
|
data.aws_secretsmanager_secret_version.creds.secret_string
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "kubernetes_namespace" "arc" {
|
||||||
|
metadata {
|
||||||
|
name = "actions-runner-system"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "helm_release" "actions-runner-controller" {
|
||||||
|
count = var.actions_runner_controller
|
||||||
|
name = "actions-runner-controller"
|
||||||
|
namespace = kubernetes_namespace.arc.metadata[0].name
|
||||||
|
create_namespace = true
|
||||||
|
chart = "actions-runner-controller"
|
||||||
|
repository = "https://actions-runner-controller.github.io/actions-runner-controller"
|
||||||
|
version = "v0.19.1"
|
||||||
|
values = [<<EOF
|
||||||
|
authSecret:
|
||||||
|
github_token: ${local.github_creds.github_token}
|
||||||
|
create: true
|
||||||
|
EOF
|
||||||
|
]
|
||||||
|
depends_on = [resource.helm_release.cm]
|
||||||
|
}
|
||||||
27
contrib/examples/terraform/cert-manager.tf
Normal file
27
contrib/examples/terraform/cert-manager.tf
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
# cert-manager must be deployed or included via the deployment process
|
||||||
|
|
||||||
|
resource "kubernetes_namespace" "cm" {
|
||||||
|
metadata {
|
||||||
|
name = "cert-manager"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "helm_release" "cm" {
|
||||||
|
count = var.actions_runner_controller
|
||||||
|
name = "cm"
|
||||||
|
namespace = kubernetes_namespace.cm.metadata[0].name
|
||||||
|
create_namespace = true
|
||||||
|
chart = "cert-manager"
|
||||||
|
repository = "https://charts.jetstack.io"
|
||||||
|
version = "v1.8.0"
|
||||||
|
values = [<<EOF
|
||||||
|
global:
|
||||||
|
podSecurityPolicy:
|
||||||
|
enabled: true
|
||||||
|
useAppArmor: true
|
||||||
|
prometheus:
|
||||||
|
enabled: false
|
||||||
|
installCRDs: true
|
||||||
|
EOF
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -9,7 +9,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/google/go-github/v45/github"
|
prometheus_metrics "github.com/actions-runner-controller/actions-runner-controller/controllers/metrics"
|
||||||
|
arcgithub "github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
|
"github.com/google/go-github/v47/github"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
)
|
)
|
||||||
@@ -21,7 +23,7 @@ const (
|
|||||||
defaultScaleDownFactor = 0.7
|
defaultScaleDownFactor = 0.7
|
||||||
)
|
)
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(ghc *arcgithub.Client, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||||
if hra.Spec.MinReplicas == nil {
|
if hra.Spec.MinReplicas == nil {
|
||||||
return nil, fmt.Errorf("horizontalrunnerautoscaler %s/%s is missing minReplicas", hra.Namespace, hra.Name)
|
return nil, fmt.Errorf("horizontalrunnerautoscaler %s/%s is missing minReplicas", hra.Namespace, hra.Name)
|
||||||
} else if hra.Spec.MaxReplicas == nil {
|
} else if hra.Spec.MaxReplicas == nil {
|
||||||
@@ -48,9 +50,9 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(st scaleTa
|
|||||||
|
|
||||||
switch primaryMetricType {
|
switch primaryMetricType {
|
||||||
case v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns:
|
case v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns:
|
||||||
suggested, err = r.suggestReplicasByQueuedAndInProgressWorkflowRuns(st, hra, &primaryMetric)
|
suggested, err = r.suggestReplicasByQueuedAndInProgressWorkflowRuns(ghc, st, hra, &primaryMetric)
|
||||||
case v1alpha1.AutoscalingMetricTypePercentageRunnersBusy:
|
case v1alpha1.AutoscalingMetricTypePercentageRunnersBusy:
|
||||||
suggested, err = r.suggestReplicasByPercentageRunnersBusy(st, hra, primaryMetric)
|
suggested, err = r.suggestReplicasByPercentageRunnersBusy(ghc, st, hra, primaryMetric)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("validating autoscaling metrics: unsupported metric type %q", primaryMetric)
|
return nil, fmt.Errorf("validating autoscaling metrics: unsupported metric type %q", primaryMetric)
|
||||||
}
|
}
|
||||||
@@ -83,11 +85,10 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(st scaleTa
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(st, hra, &fallbackMetric)
|
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(ghc, st, hra, &fallbackMetric)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgressWorkflowRuns(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics *v1alpha1.MetricSpec) (*int, error) {
|
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgressWorkflowRuns(ghc *arcgithub.Client, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics *v1alpha1.MetricSpec) (*int, error) {
|
||||||
|
|
||||||
var repos [][]string
|
var repos [][]string
|
||||||
repoID := st.repo
|
repoID := st.repo
|
||||||
if repoID == "" {
|
if repoID == "" {
|
||||||
@@ -126,7 +127,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
|||||||
opt := github.ListWorkflowJobsOptions{ListOptions: github.ListOptions{PerPage: 50}}
|
opt := github.ListWorkflowJobsOptions{ListOptions: github.ListOptions{PerPage: 50}}
|
||||||
var allJobs []*github.WorkflowJob
|
var allJobs []*github.WorkflowJob
|
||||||
for {
|
for {
|
||||||
jobs, resp, err := r.GitHubClient.Actions.ListWorkflowJobs(context.TODO(), user, repoName, runID, &opt)
|
jobs, resp, err := ghc.Actions.ListWorkflowJobs(context.TODO(), user, repoName, runID, &opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Log.Error(err, "Error listing workflow jobs")
|
r.Log.Error(err, "Error listing workflow jobs")
|
||||||
return //err
|
return //err
|
||||||
@@ -184,7 +185,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
|||||||
|
|
||||||
for _, repo := range repos {
|
for _, repo := range repos {
|
||||||
user, repoName := repo[0], repo[1]
|
user, repoName := repo[0], repo[1]
|
||||||
workflowRuns, err := r.GitHubClient.ListRepositoryWorkflowRuns(context.TODO(), user, repoName)
|
workflowRuns, err := ghc.ListRepositoryWorkflowRuns(context.TODO(), user, repoName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -211,6 +212,20 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
|||||||
|
|
||||||
necessaryReplicas := queued + inProgress
|
necessaryReplicas := queued + inProgress
|
||||||
|
|
||||||
|
prometheus_metrics.SetHorizontalRunnerAutoscalerQueuedAndInProgressWorkflowRuns(
|
||||||
|
hra.ObjectMeta,
|
||||||
|
st.enterprise,
|
||||||
|
st.org,
|
||||||
|
st.repo,
|
||||||
|
st.kind,
|
||||||
|
st.st,
|
||||||
|
necessaryReplicas,
|
||||||
|
completed,
|
||||||
|
inProgress,
|
||||||
|
queued,
|
||||||
|
unknown,
|
||||||
|
)
|
||||||
|
|
||||||
r.Log.V(1).Info(
|
r.Log.V(1).Info(
|
||||||
fmt.Sprintf("Suggested desired replicas of %d by TotalNumberOfQueuedAndInProgressWorkflowRuns", necessaryReplicas),
|
fmt.Sprintf("Suggested desired replicas of %d by TotalNumberOfQueuedAndInProgressWorkflowRuns", necessaryReplicas),
|
||||||
"workflow_runs_completed", completed,
|
"workflow_runs_completed", completed,
|
||||||
@@ -226,7 +241,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
|||||||
return &necessaryReplicas, nil
|
return &necessaryReplicas, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunnersBusy(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics v1alpha1.MetricSpec) (*int, error) {
|
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunnersBusy(ghc *arcgithub.Client, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics v1alpha1.MetricSpec) (*int, error) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
scaleUpThreshold := defaultScaleUpThreshold
|
scaleUpThreshold := defaultScaleUpThreshold
|
||||||
scaleDownThreshold := defaultScaleDownThreshold
|
scaleDownThreshold := defaultScaleDownThreshold
|
||||||
@@ -295,7 +310,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ListRunners will return all runners managed by GitHub - not restricted to ns
|
// ListRunners will return all runners managed by GitHub - not restricted to ns
|
||||||
runners, err := r.GitHubClient.ListRunners(
|
runners, err := ghc.ListRunners(
|
||||||
ctx,
|
ctx,
|
||||||
enterprise,
|
enterprise,
|
||||||
organization,
|
organization,
|
||||||
@@ -382,6 +397,19 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
|
|||||||
//
|
//
|
||||||
// - num_runners can be as twice as large as replicas_desired_before while
|
// - num_runners can be as twice as large as replicas_desired_before while
|
||||||
// the runnerdeployment controller is replacing RunnerReplicaSet for runner update.
|
// the runnerdeployment controller is replacing RunnerReplicaSet for runner update.
|
||||||
|
prometheus_metrics.SetHorizontalRunnerAutoscalerPercentageRunnersBusy(
|
||||||
|
hra.ObjectMeta,
|
||||||
|
st.enterprise,
|
||||||
|
st.org,
|
||||||
|
st.repo,
|
||||||
|
st.kind,
|
||||||
|
st.st,
|
||||||
|
desiredReplicas,
|
||||||
|
numRunners,
|
||||||
|
numRunnersRegistered,
|
||||||
|
numRunnersBusy,
|
||||||
|
numTerminatingBusy,
|
||||||
|
)
|
||||||
|
|
||||||
r.Log.V(1).Info(
|
r.Log.V(1).Info(
|
||||||
fmt.Sprintf("Suggested desired replicas of %d by PercentageRunnersBusy", desiredReplicas),
|
fmt.Sprintf("Suggested desired replicas of %d by PercentageRunnersBusy", desiredReplicas),
|
||||||
|
|||||||
@@ -330,7 +330,6 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
|
|
||||||
h := &HorizontalRunnerAutoscalerReconciler{
|
h := &HorizontalRunnerAutoscalerReconciler{
|
||||||
Log: log,
|
Log: log,
|
||||||
GitHubClient: client,
|
|
||||||
Scheme: scheme,
|
Scheme: scheme,
|
||||||
DefaultScaleDownDelay: DefaultScaleDownDelay,
|
DefaultScaleDownDelay: DefaultScaleDownDelay,
|
||||||
}
|
}
|
||||||
@@ -379,7 +378,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
|
|
||||||
st := h.scaleTargetFromRD(context.Background(), rd)
|
st := h.scaleTargetFromRD(context.Background(), rd)
|
||||||
|
|
||||||
got, err := h.computeReplicasWithCache(log, metav1Now.Time, st, hra, minReplicas)
|
got, err := h.computeReplicasWithCache(client, log, metav1Now.Time, st, hra, minReplicas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if tc.err == "" {
|
if tc.err == "" {
|
||||||
t.Fatalf("unexpected error: expected none, got %v", err)
|
t.Fatalf("unexpected error: expected none, got %v", err)
|
||||||
@@ -720,7 +719,6 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
h := &HorizontalRunnerAutoscalerReconciler{
|
h := &HorizontalRunnerAutoscalerReconciler{
|
||||||
Log: log,
|
Log: log,
|
||||||
Scheme: scheme,
|
Scheme: scheme,
|
||||||
GitHubClient: client,
|
|
||||||
DefaultScaleDownDelay: DefaultScaleDownDelay,
|
DefaultScaleDownDelay: DefaultScaleDownDelay,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -781,7 +779,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
|
|
||||||
st := h.scaleTargetFromRD(context.Background(), rd)
|
st := h.scaleTargetFromRD(context.Background(), rd)
|
||||||
|
|
||||||
got, err := h.computeReplicasWithCache(log, metav1Now.Time, st, hra, minReplicas)
|
got, err := h.computeReplicasWithCache(client, log, metav1Now.Time, st, hra, minReplicas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if tc.err == "" {
|
if tc.err == "" {
|
||||||
t.Fatalf("unexpected error: expected none, got %v", err)
|
t.Fatalf("unexpected error: expected none, got %v", err)
|
||||||
|
|||||||
@@ -79,7 +79,6 @@ func (s *batchScaler) Add(st *ScaleTarget) {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-after:
|
case <-after:
|
||||||
after = nil
|
|
||||||
break batch
|
break batch
|
||||||
case st := <-s.queue:
|
case st := <-s.queue:
|
||||||
nsName := types.NamespacedName{
|
nsName := types.NamespacedName{
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -30,7 +30,7 @@ import (
|
|||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
gogithub "github.com/google/go-github/v45/github"
|
gogithub "github.com/google/go-github/v47/github"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
@@ -75,10 +75,8 @@ type HorizontalRunnerAutoscalerGitHubWebhook struct {
|
|||||||
// A scale target is enqueued on each retrieval of each eligible webhook event, so that it is processed asynchronously.
|
// A scale target is enqueued on each retrieval of each eligible webhook event, so that it is processed asynchronously.
|
||||||
QueueLimit int
|
QueueLimit int
|
||||||
|
|
||||||
worker *worker
|
worker *worker
|
||||||
workerInit sync.Once
|
workerInit sync.Once
|
||||||
workerStart sync.Once
|
|
||||||
batchCh chan *ScaleTarget
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) {
|
||||||
@@ -133,7 +131,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
payload, err = ioutil.ReadAll(r.Body)
|
payload, err = io.ReadAll(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
autoscaler.Log.Error(err, "error reading request body")
|
autoscaler.Log.Error(err, "error reading request body")
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package controllers
|
|||||||
import (
|
import (
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/pkg/actionsglob"
|
"github.com/actions-runner-controller/actions-runner-controller/pkg/actionsglob"
|
||||||
"github.com/google/go-github/v45/github"
|
"github.com/google/go-github/v47/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchCheckRunEvent(event *github.CheckRunEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchCheckRunEvent(event *github.CheckRunEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/google/go-github/v45/github"
|
"github.com/google/go-github/v47/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPullRequestEvent(event *github.PullRequestEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPullRequestEvent(event *github.PullRequestEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/google/go-github/v45/github"
|
"github.com/google/go-github/v47/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPushEvent(event *github.PushEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPushEvent(event *github.PushEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"net/url"
|
"net/url"
|
||||||
@@ -15,7 +14,7 @@ import (
|
|||||||
|
|
||||||
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"github.com/google/go-github/v45/github"
|
"github.com/google/go-github/v47/github"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
@@ -504,7 +503,7 @@ func testServerWithInitObjs(t *testing.T, eventType string, event interface{}, w
|
|||||||
|
|
||||||
hraWebhook := &HorizontalRunnerAutoscalerGitHubWebhook{}
|
hraWebhook := &HorizontalRunnerAutoscalerGitHubWebhook{}
|
||||||
|
|
||||||
client := fake.NewFakeClientWithScheme(sc, initObjs...)
|
client := fake.NewClientBuilder().WithScheme(sc).WithRuntimeObjects(initObjs...).Build()
|
||||||
|
|
||||||
logs := installTestLogger(hraWebhook)
|
logs := installTestLogger(hraWebhook)
|
||||||
|
|
||||||
@@ -537,7 +536,7 @@ func testServerWithInitObjs(t *testing.T, eventType string, event interface{}, w
|
|||||||
t.Error("status:", resp.StatusCode)
|
t.Error("status:", resp.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
respBody, err := ioutil.ReadAll(resp.Body)
|
respBody, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -575,7 +574,7 @@ func sendWebhook(server *httptest.Server, eventType string, event interface{}) (
|
|||||||
"X-GitHub-Event": {eventType},
|
"X-GitHub-Event": {eventType},
|
||||||
"Content-Type": {"application/json"},
|
"Content-Type": {"application/json"},
|
||||||
},
|
},
|
||||||
Body: ioutil.NopCloser(bytes.NewBuffer(reqBody)),
|
Body: io.NopCloser(bytes.NewBuffer(reqBody)),
|
||||||
}
|
}
|
||||||
|
|
||||||
return http.DefaultClient.Do(req)
|
return http.DefaultClient.Do(req)
|
||||||
@@ -607,7 +606,7 @@ func (l *testLogSink) Info(_ int, msg string, kvs ...interface{}) {
|
|||||||
fmt.Fprintf(l.writer, "\n")
|
fmt.Fprintf(l.writer, "\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ *testLogSink) Enabled(level int) bool {
|
func (*testLogSink) Enabled(level int) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,6 @@ import (
|
|||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@@ -38,6 +37,7 @@ import (
|
|||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/controllers/metrics"
|
"github.com/actions-runner-controller/actions-runner-controller/controllers/metrics"
|
||||||
|
arcgithub "github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -47,11 +47,10 @@ const (
|
|||||||
// HorizontalRunnerAutoscalerReconciler reconciles a HorizontalRunnerAutoscaler object
|
// HorizontalRunnerAutoscalerReconciler reconciles a HorizontalRunnerAutoscaler object
|
||||||
type HorizontalRunnerAutoscalerReconciler struct {
|
type HorizontalRunnerAutoscalerReconciler struct {
|
||||||
client.Client
|
client.Client
|
||||||
GitHubClient *github.Client
|
GitHubClient *MultiGitHubClient
|
||||||
Log logr.Logger
|
Log logr.Logger
|
||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
CacheDuration time.Duration
|
|
||||||
DefaultScaleDownDelay time.Duration
|
DefaultScaleDownDelay time.Duration
|
||||||
Name string
|
Name string
|
||||||
}
|
}
|
||||||
@@ -73,6 +72,8 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !hra.ObjectMeta.DeletionTimestamp.IsZero() {
|
if !hra.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
r.GitHubClient.DeinitForHRA(&hra)
|
||||||
|
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -310,7 +311,12 @@ func (r *HorizontalRunnerAutoscalerReconciler) reconcile(ctx context.Context, re
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
newDesiredReplicas, err := r.computeReplicasWithCache(log, now, st, hra, minReplicas)
|
ghc, err := r.GitHubClient.InitForHRA(context.Background(), &hra)
|
||||||
|
if err != nil {
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
newDesiredReplicas, err := r.computeReplicasWithCache(ghc, log, now, st, hra, minReplicas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
||||||
|
|
||||||
@@ -461,10 +467,10 @@ func (r *HorizontalRunnerAutoscalerReconciler) getMinReplicas(log logr.Logger, n
|
|||||||
return minReplicas, active, upcoming, nil
|
return minReplicas, active, upcoming, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(log logr.Logger, now time.Time, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, minReplicas int) (int, error) {
|
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(ghc *arcgithub.Client, log logr.Logger, now time.Time, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, minReplicas int) (int, error) {
|
||||||
var suggestedReplicas int
|
var suggestedReplicas int
|
||||||
|
|
||||||
v, err := r.suggestDesiredReplicas(st, hra)
|
v, err := r.suggestDesiredReplicas(ghc, st, hra)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
github2 "github.com/actions-runner-controller/actions-runner-controller/github"
|
github2 "github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
"github.com/google/go-github/v45/github"
|
"github.com/google/go-github/v47/github"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
||||||
|
|
||||||
@@ -99,12 +99,14 @@ func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
|
|||||||
return fmt.Sprintf("%s%s", ns.Name, name)
|
return fmt.Sprintf("%s%s", ns.Name, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
multiClient := NewMultiGitHubClient(mgr.GetClient(), env.ghClient)
|
||||||
|
|
||||||
runnerController := &RunnerReconciler{
|
runnerController := &RunnerReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Scheme: scheme.Scheme,
|
Scheme: scheme.Scheme,
|
||||||
Log: logf.Log,
|
Log: logf.Log,
|
||||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||||
GitHubClient: env.ghClient,
|
GitHubClient: multiClient,
|
||||||
RunnerImage: "example/runner:test",
|
RunnerImage: "example/runner:test",
|
||||||
DockerImage: "example/docker:test",
|
DockerImage: "example/docker:test",
|
||||||
Name: controllerName("runner"),
|
Name: controllerName("runner"),
|
||||||
@@ -116,12 +118,11 @@ func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
|
|||||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runner controller")
|
Expect(err).NotTo(HaveOccurred(), "failed to setup runner controller")
|
||||||
|
|
||||||
replicasetController := &RunnerReplicaSetReconciler{
|
replicasetController := &RunnerReplicaSetReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Scheme: scheme.Scheme,
|
Scheme: scheme.Scheme,
|
||||||
Log: logf.Log,
|
Log: logf.Log,
|
||||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||||
GitHubClient: env.ghClient,
|
Name: controllerName("runnerreplicaset"),
|
||||||
Name: controllerName("runnerreplicaset"),
|
|
||||||
}
|
}
|
||||||
err = replicasetController.SetupWithManager(mgr)
|
err = replicasetController.SetupWithManager(mgr)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runnerreplicaset controller")
|
Expect(err).NotTo(HaveOccurred(), "failed to setup runnerreplicaset controller")
|
||||||
@@ -137,13 +138,12 @@ func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
|
|||||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runnerdeployment controller")
|
Expect(err).NotTo(HaveOccurred(), "failed to setup runnerdeployment controller")
|
||||||
|
|
||||||
autoscalerController := &HorizontalRunnerAutoscalerReconciler{
|
autoscalerController := &HorizontalRunnerAutoscalerReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Scheme: scheme.Scheme,
|
Scheme: scheme.Scheme,
|
||||||
Log: logf.Log,
|
Log: logf.Log,
|
||||||
GitHubClient: env.ghClient,
|
GitHubClient: multiClient,
|
||||||
Recorder: mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller"),
|
Recorder: mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller"),
|
||||||
CacheDuration: 1 * time.Second,
|
Name: controllerName("horizontalrunnerautoscaler"),
|
||||||
Name: controllerName("horizontalrunnerautoscaler"),
|
|
||||||
}
|
}
|
||||||
err = autoscalerController.SetupWithManager(mgr)
|
err = autoscalerController.SetupWithManager(mgr)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to setup autoscaler controller")
|
Expect(err).NotTo(HaveOccurred(), "failed to setup autoscaler controller")
|
||||||
|
|||||||
@@ -7,8 +7,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
hraName = "horizontalrunnerautoscaler"
|
hraName = "horizontalrunnerautoscaler"
|
||||||
hraNamespace = "namespace"
|
hraNamespace = "namespace"
|
||||||
|
stEnterprise = "enterprise"
|
||||||
|
stOrganization = "organization"
|
||||||
|
stRepository = "repository"
|
||||||
|
stKind = "kind"
|
||||||
|
stName = "name"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -16,6 +21,16 @@ var (
|
|||||||
horizontalRunnerAutoscalerMinReplicas,
|
horizontalRunnerAutoscalerMinReplicas,
|
||||||
horizontalRunnerAutoscalerMaxReplicas,
|
horizontalRunnerAutoscalerMaxReplicas,
|
||||||
horizontalRunnerAutoscalerDesiredReplicas,
|
horizontalRunnerAutoscalerDesiredReplicas,
|
||||||
|
horizontalRunnerAutoscalerReplicasDesired,
|
||||||
|
horizontalRunnerAutoscalerRunners,
|
||||||
|
horizontalRunnerAutoscalerRunnersRegistered,
|
||||||
|
horizontalRunnerAutoscalerRunnersBusy,
|
||||||
|
horizontalRunnerAutoscalerTerminatingBusy,
|
||||||
|
horizontalRunnerAutoscalerNecessaryReplicas,
|
||||||
|
horizontalRunnerAutoscalerWorkflowRunsCompleted,
|
||||||
|
horizontalRunnerAutoscalerWorkflowRunsInProgress,
|
||||||
|
horizontalRunnerAutoscalerWorkflowRunsQueued,
|
||||||
|
horizontalRunnerAutoscalerWorkflowRunsUnknown,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -41,6 +56,78 @@ var (
|
|||||||
},
|
},
|
||||||
[]string{hraName, hraNamespace},
|
[]string{hraName, hraNamespace},
|
||||||
)
|
)
|
||||||
|
// PercentageRunnersBusy
|
||||||
|
horizontalRunnerAutoscalerReplicasDesired = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "horizontalrunnerautoscaler_replicas_desired",
|
||||||
|
Help: "replicas_desired of PercentageRunnersBusy",
|
||||||
|
},
|
||||||
|
[]string{hraName, hraNamespace, stEnterprise, stOrganization, stRepository, stKind, stName},
|
||||||
|
)
|
||||||
|
horizontalRunnerAutoscalerRunners = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "horizontalrunnerautoscaler_runners",
|
||||||
|
Help: "num_runners of PercentageRunnersBusy",
|
||||||
|
},
|
||||||
|
[]string{hraName, hraNamespace, stEnterprise, stOrganization, stRepository, stKind, stName},
|
||||||
|
)
|
||||||
|
horizontalRunnerAutoscalerRunnersRegistered = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "horizontalrunnerautoscaler_runners_registered",
|
||||||
|
Help: "num_runners_registered of PercentageRunnersBusy",
|
||||||
|
},
|
||||||
|
[]string{hraName, hraNamespace, stEnterprise, stOrganization, stRepository, stKind, stName},
|
||||||
|
)
|
||||||
|
horizontalRunnerAutoscalerRunnersBusy = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "horizontalrunnerautoscaler_runners_busy",
|
||||||
|
Help: "num_runners_busy of PercentageRunnersBusy",
|
||||||
|
},
|
||||||
|
[]string{hraName, hraNamespace, stEnterprise, stOrganization, stRepository, stKind, stName},
|
||||||
|
)
|
||||||
|
horizontalRunnerAutoscalerTerminatingBusy = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "horizontalrunnerautoscaler_terminating_busy",
|
||||||
|
Help: "num_terminating_busy of PercentageRunnersBusy",
|
||||||
|
},
|
||||||
|
[]string{hraName, hraNamespace, stEnterprise, stOrganization, stRepository, stKind, stName},
|
||||||
|
)
|
||||||
|
// QueuedAndInProgressWorkflowRuns
|
||||||
|
horizontalRunnerAutoscalerNecessaryReplicas = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "horizontalrunnerautoscaler_necessary_replicas",
|
||||||
|
Help: "necessary_replicas of QueuedAndInProgressWorkflowRuns",
|
||||||
|
},
|
||||||
|
[]string{hraName, hraNamespace, stEnterprise, stOrganization, stRepository, stKind, stName},
|
||||||
|
)
|
||||||
|
horizontalRunnerAutoscalerWorkflowRunsCompleted = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "horizontalrunnerautoscaler_workflow_runs_completed",
|
||||||
|
Help: "workflow_runs_completed of QueuedAndInProgressWorkflowRuns",
|
||||||
|
},
|
||||||
|
[]string{hraName, hraNamespace, stEnterprise, stOrganization, stRepository, stKind, stName},
|
||||||
|
)
|
||||||
|
horizontalRunnerAutoscalerWorkflowRunsInProgress = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "horizontalrunnerautoscaler_workflow_runs_in_progress",
|
||||||
|
Help: "workflow_runs_in_progress of QueuedAndInProgressWorkflowRuns",
|
||||||
|
},
|
||||||
|
[]string{hraName, hraNamespace, stEnterprise, stOrganization, stRepository, stKind, stName},
|
||||||
|
)
|
||||||
|
horizontalRunnerAutoscalerWorkflowRunsQueued = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "horizontalrunnerautoscaler_workflow_runs_queued",
|
||||||
|
Help: "workflow_runs_queued of QueuedAndInProgressWorkflowRuns",
|
||||||
|
},
|
||||||
|
[]string{hraName, hraNamespace, stEnterprise, stOrganization, stRepository, stKind, stName},
|
||||||
|
)
|
||||||
|
horizontalRunnerAutoscalerWorkflowRunsUnknown = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "horizontalrunnerautoscaler_workflow_runs_unknown",
|
||||||
|
Help: "workflow_runs_unknown of QueuedAndInProgressWorkflowRuns",
|
||||||
|
},
|
||||||
|
[]string{hraName, hraNamespace, stEnterprise, stOrganization, stRepository, stKind, stName},
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
func SetHorizontalRunnerAutoscalerSpec(o metav1.ObjectMeta, spec v1alpha1.HorizontalRunnerAutoscalerSpec) {
|
func SetHorizontalRunnerAutoscalerSpec(o metav1.ObjectMeta, spec v1alpha1.HorizontalRunnerAutoscalerSpec) {
|
||||||
@@ -65,3 +152,61 @@ func SetHorizontalRunnerAutoscalerStatus(o metav1.ObjectMeta, status v1alpha1.Ho
|
|||||||
horizontalRunnerAutoscalerDesiredReplicas.With(labels).Set(float64(*status.DesiredReplicas))
|
horizontalRunnerAutoscalerDesiredReplicas.With(labels).Set(float64(*status.DesiredReplicas))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func SetHorizontalRunnerAutoscalerPercentageRunnersBusy(
|
||||||
|
o metav1.ObjectMeta,
|
||||||
|
enterprise string,
|
||||||
|
organization string,
|
||||||
|
repository string,
|
||||||
|
kind string,
|
||||||
|
name string,
|
||||||
|
desiredReplicas int,
|
||||||
|
numRunners int,
|
||||||
|
numRunnersRegistered int,
|
||||||
|
numRunnersBusy int,
|
||||||
|
numTerminatingBusy int,
|
||||||
|
) {
|
||||||
|
labels := prometheus.Labels{
|
||||||
|
hraName: o.Name,
|
||||||
|
hraNamespace: o.Namespace,
|
||||||
|
stEnterprise: enterprise,
|
||||||
|
stOrganization: organization,
|
||||||
|
stRepository: repository,
|
||||||
|
stKind: kind,
|
||||||
|
stName: name,
|
||||||
|
}
|
||||||
|
horizontalRunnerAutoscalerReplicasDesired.With(labels).Set(float64(desiredReplicas))
|
||||||
|
horizontalRunnerAutoscalerRunners.With(labels).Set(float64(numRunners))
|
||||||
|
horizontalRunnerAutoscalerRunnersRegistered.With(labels).Set(float64(numRunnersRegistered))
|
||||||
|
horizontalRunnerAutoscalerRunnersBusy.With(labels).Set(float64(numRunnersBusy))
|
||||||
|
horizontalRunnerAutoscalerTerminatingBusy.With(labels).Set(float64(numTerminatingBusy))
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetHorizontalRunnerAutoscalerQueuedAndInProgressWorkflowRuns(
|
||||||
|
o metav1.ObjectMeta,
|
||||||
|
enterprise string,
|
||||||
|
organization string,
|
||||||
|
repository string,
|
||||||
|
kind string,
|
||||||
|
name string,
|
||||||
|
necessaryReplicas int,
|
||||||
|
workflowRunsCompleted int,
|
||||||
|
workflowRunsInProgress int,
|
||||||
|
workflowRunsQueued int,
|
||||||
|
workflowRunsUnknown int,
|
||||||
|
) {
|
||||||
|
labels := prometheus.Labels{
|
||||||
|
hraName: o.Name,
|
||||||
|
hraNamespace: o.Namespace,
|
||||||
|
stEnterprise: enterprise,
|
||||||
|
stOrganization: organization,
|
||||||
|
stRepository: repository,
|
||||||
|
stKind: kind,
|
||||||
|
stName: name,
|
||||||
|
}
|
||||||
|
horizontalRunnerAutoscalerNecessaryReplicas.With(labels).Set(float64(necessaryReplicas))
|
||||||
|
horizontalRunnerAutoscalerWorkflowRunsCompleted.With(labels).Set(float64(workflowRunsCompleted))
|
||||||
|
horizontalRunnerAutoscalerWorkflowRunsInProgress.With(labels).Set(float64(workflowRunsInProgress))
|
||||||
|
horizontalRunnerAutoscalerWorkflowRunsQueued.With(labels).Set(float64(workflowRunsQueued))
|
||||||
|
horizontalRunnerAutoscalerWorkflowRunsUnknown.With(labels).Set(float64(workflowRunsUnknown))
|
||||||
|
}
|
||||||
|
|||||||
@@ -10,12 +10,6 @@ const (
|
|||||||
rsNamespace = "namespace"
|
rsNamespace = "namespace"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
runnerSetMetrics = []prometheus.Collector{
|
|
||||||
runnerSetReplicas,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
runnerSetReplicas = prometheus.NewGaugeVec(
|
runnerSetReplicas = prometheus.NewGaugeVec(
|
||||||
prometheus.GaugeOpts{
|
prometheus.GaugeOpts{
|
||||||
|
|||||||
334
controllers/multi_githubclient.go
Normal file
334
controllers/multi_githubclient.go
Normal file
@@ -0,0 +1,334 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha1"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// The api creds scret annotation is added by the runner controller or the runnerset controller according to runner.spec.githubAPICredentialsFrom.secretRef.name,
|
||||||
|
// so that the runner pod controller can share the same GitHub API credentials and the instance of the GitHub API client with the upstream controllers.
|
||||||
|
annotationKeyGitHubAPICredsSecret = annotationKeyPrefix + "github-api-creds-secret"
|
||||||
|
)
|
||||||
|
|
||||||
|
type runnerOwnerRef struct {
|
||||||
|
// kind is either StatefulSet or Runner, and populated via the owner reference in the runner pod controller or via the reconcilation target's kind in
|
||||||
|
// runnerset and runner controllers.
|
||||||
|
kind string
|
||||||
|
ns, name string
|
||||||
|
}
|
||||||
|
|
||||||
|
type secretRef struct {
|
||||||
|
ns, name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// savedClient is the each cache entry that contains the client for the specific set of credentials,
|
||||||
|
// like a PAT or a pair of key and cert.
|
||||||
|
// the `hash` is a part of the savedClient not the key because we are going to keep only the client for the latest creds
|
||||||
|
// in case the operator updated the k8s secret containing the credentials.
|
||||||
|
type savedClient struct {
|
||||||
|
hash string
|
||||||
|
|
||||||
|
// refs is the map of all the objects that references this client, used for reference counting to gc
|
||||||
|
// the client if unneeded.
|
||||||
|
refs map[runnerOwnerRef]struct{}
|
||||||
|
|
||||||
|
*github.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
type resourceReader interface {
|
||||||
|
Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type MultiGitHubClient struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
|
||||||
|
client resourceReader
|
||||||
|
|
||||||
|
githubClient *github.Client
|
||||||
|
|
||||||
|
// The saved client is freed once all its dependents disappear, or the contents of the secret changed.
|
||||||
|
// We track dependents via a golang map embedded within the savedClient struct. Each dependent is checked on their respective Kubernetes finalizer,
|
||||||
|
// so that we won't miss any dependent's termination.
|
||||||
|
// The change is the secret is determined using the hash of its contents.
|
||||||
|
clients map[secretRef]savedClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMultiGitHubClient(client resourceReader, githubClient *github.Client) *MultiGitHubClient {
|
||||||
|
return &MultiGitHubClient{
|
||||||
|
client: client,
|
||||||
|
githubClient: githubClient,
|
||||||
|
clients: map[secretRef]savedClient{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init sets up and return the *github.Client for the object.
|
||||||
|
// In case the object (like RunnerDeployment) does not request a custom client, it returns the default client.
|
||||||
|
func (c *MultiGitHubClient) InitForRunnerPod(ctx context.Context, pod *corev1.Pod) (*github.Client, error) {
|
||||||
|
// These 3 default values are used only when the user created the pod directly, not via Runner, RunnerReplicaSet, RunnerDeploment, or RunnerSet resources.
|
||||||
|
ref := refFromRunnerPod(pod)
|
||||||
|
secretName := pod.Annotations[annotationKeyGitHubAPICredsSecret]
|
||||||
|
|
||||||
|
// kind can be any of Pod, Runner, RunnerReplicaSet, RunnerDeployment, or RunnerSet depending on which custom resource the user directly created.
|
||||||
|
return c.initClientWithSecretName(ctx, pod.Namespace, secretName, ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init sets up and return the *github.Client for the object.
|
||||||
|
// In case the object (like RunnerDeployment) does not request a custom client, it returns the default client.
|
||||||
|
func (c *MultiGitHubClient) InitForRunner(ctx context.Context, r *v1alpha1.Runner) (*github.Client, error) {
|
||||||
|
var secretName string
|
||||||
|
if r.Spec.GitHubAPICredentialsFrom != nil {
|
||||||
|
secretName = r.Spec.GitHubAPICredentialsFrom.SecretRef.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// These 3 default values are used only when the user created the runner resource directly, not via RunnerReplicaSet, RunnerDeploment, or RunnerSet resources.
|
||||||
|
ref := refFromRunner(r)
|
||||||
|
if ref.ns != r.Namespace {
|
||||||
|
return nil, fmt.Errorf("referencing github api creds secret from owner in another namespace is not supported yet")
|
||||||
|
}
|
||||||
|
|
||||||
|
// kind can be any of Runner, RunnerReplicaSet, or RunnerDeployment depending on which custom resource the user directly created.
|
||||||
|
return c.initClientWithSecretName(ctx, r.Namespace, secretName, ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init sets up and return the *github.Client for the object.
|
||||||
|
// In case the object (like RunnerDeployment) does not request a custom client, it returns the default client.
|
||||||
|
func (c *MultiGitHubClient) InitForRunnerSet(ctx context.Context, rs *v1alpha1.RunnerSet) (*github.Client, error) {
|
||||||
|
ref := refFromRunnerSet(rs)
|
||||||
|
|
||||||
|
var secretName string
|
||||||
|
if rs.Spec.GitHubAPICredentialsFrom != nil {
|
||||||
|
secretName = rs.Spec.GitHubAPICredentialsFrom.SecretRef.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.initClientWithSecretName(ctx, rs.Namespace, secretName, ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init sets up and return the *github.Client for the object.
|
||||||
|
// In case the object (like RunnerDeployment) does not request a custom client, it returns the default client.
|
||||||
|
func (c *MultiGitHubClient) InitForHRA(ctx context.Context, hra *v1alpha1.HorizontalRunnerAutoscaler) (*github.Client, error) {
|
||||||
|
ref := refFromHorizontalRunnerAutoscaler(hra)
|
||||||
|
|
||||||
|
var secretName string
|
||||||
|
if hra.Spec.GitHubAPICredentialsFrom != nil {
|
||||||
|
secretName = hra.Spec.GitHubAPICredentialsFrom.SecretRef.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.initClientWithSecretName(ctx, hra.Namespace, secretName, ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MultiGitHubClient) DeinitForRunnerPod(p *corev1.Pod) {
|
||||||
|
secretName := p.Annotations[annotationKeyGitHubAPICredsSecret]
|
||||||
|
c.derefClient(p.Namespace, secretName, refFromRunnerPod(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MultiGitHubClient) DeinitForRunner(r *v1alpha1.Runner) {
|
||||||
|
var secretName string
|
||||||
|
if r.Spec.GitHubAPICredentialsFrom != nil {
|
||||||
|
secretName = r.Spec.GitHubAPICredentialsFrom.SecretRef.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
c.derefClient(r.Namespace, secretName, refFromRunner(r))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MultiGitHubClient) DeinitForRunnerSet(rs *v1alpha1.RunnerSet) {
|
||||||
|
var secretName string
|
||||||
|
if rs.Spec.GitHubAPICredentialsFrom != nil {
|
||||||
|
secretName = rs.Spec.GitHubAPICredentialsFrom.SecretRef.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
c.derefClient(rs.Namespace, secretName, refFromRunnerSet(rs))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MultiGitHubClient) DeinitForHRA(hra *v1alpha1.HorizontalRunnerAutoscaler) {
|
||||||
|
var secretName string
|
||||||
|
if hra.Spec.GitHubAPICredentialsFrom != nil {
|
||||||
|
secretName = hra.Spec.GitHubAPICredentialsFrom.SecretRef.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
c.derefClient(hra.Namespace, secretName, refFromHorizontalRunnerAutoscaler(hra))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MultiGitHubClient) initClientForSecret(secret *corev1.Secret, dependent *runnerOwnerRef) (*savedClient, error) {
|
||||||
|
secRef := secretRef{
|
||||||
|
ns: secret.Namespace,
|
||||||
|
name: secret.Name,
|
||||||
|
}
|
||||||
|
|
||||||
|
cliRef := c.clients[secRef]
|
||||||
|
|
||||||
|
var ks []string
|
||||||
|
|
||||||
|
for k := range secret.Data {
|
||||||
|
ks = append(ks, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.SliceStable(ks, func(i, j int) bool { return ks[i] < ks[j] })
|
||||||
|
|
||||||
|
hash := sha1.New()
|
||||||
|
for _, k := range ks {
|
||||||
|
hash.Write(secret.Data[k])
|
||||||
|
}
|
||||||
|
hashStr := hex.EncodeToString(hash.Sum(nil))
|
||||||
|
|
||||||
|
if cliRef.hash != hashStr {
|
||||||
|
delete(c.clients, secRef)
|
||||||
|
|
||||||
|
conf, err := secretDataToGitHubClientConfig(secret.Data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback to the controller-wide setting if EnterpriseURL is not set and the original client is an enterprise client.
|
||||||
|
if conf.EnterpriseURL == "" && c.githubClient.IsEnterprise {
|
||||||
|
conf.EnterpriseURL = c.githubClient.GithubBaseURL
|
||||||
|
}
|
||||||
|
|
||||||
|
cli, err := conf.NewClient()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cliRef = savedClient{
|
||||||
|
hash: hashStr,
|
||||||
|
refs: map[runnerOwnerRef]struct{}{},
|
||||||
|
Client: cli,
|
||||||
|
}
|
||||||
|
|
||||||
|
c.clients[secRef] = cliRef
|
||||||
|
}
|
||||||
|
|
||||||
|
if dependent != nil {
|
||||||
|
c.clients[secRef].refs[*dependent] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &cliRef, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MultiGitHubClient) initClientWithSecretName(ctx context.Context, ns, secretName string, runRef *runnerOwnerRef) (*github.Client, error) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
if secretName == "" {
|
||||||
|
return c.githubClient, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
secRef := secretRef{
|
||||||
|
ns: ns,
|
||||||
|
name: secretName,
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := c.clients[secRef]; !ok {
|
||||||
|
c.clients[secRef] = savedClient{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var sec corev1.Secret
|
||||||
|
if err := c.client.Get(ctx, types.NamespacedName{Namespace: ns, Name: secretName}, &sec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
savedClient, err := c.initClientForSecret(&sec, runRef)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return savedClient.Client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MultiGitHubClient) derefClient(ns, secretName string, dependent *runnerOwnerRef) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
secRef := secretRef{
|
||||||
|
ns: ns,
|
||||||
|
name: secretName,
|
||||||
|
}
|
||||||
|
|
||||||
|
if dependent != nil {
|
||||||
|
delete(c.clients[secRef].refs, *dependent)
|
||||||
|
}
|
||||||
|
|
||||||
|
cliRef := c.clients[secRef]
|
||||||
|
|
||||||
|
if dependent == nil || len(cliRef.refs) == 0 {
|
||||||
|
delete(c.clients, secRef)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func secretDataToGitHubClientConfig(data map[string][]byte) (*github.Config, error) {
|
||||||
|
var (
|
||||||
|
conf github.Config
|
||||||
|
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
conf.URL = string(data["github_url"])
|
||||||
|
|
||||||
|
conf.UploadURL = string(data["github_upload_url"])
|
||||||
|
|
||||||
|
conf.EnterpriseURL = string(data["github_enterprise_url"])
|
||||||
|
|
||||||
|
conf.RunnerGitHubURL = string(data["github_runner_url"])
|
||||||
|
|
||||||
|
conf.Token = string(data["github_token"])
|
||||||
|
|
||||||
|
appID := string(data["github_app_id"])
|
||||||
|
|
||||||
|
conf.AppID, err = strconv.ParseInt(appID, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
instID := string(data["github_app_installation_id"])
|
||||||
|
|
||||||
|
conf.AppInstallationID, err = strconv.ParseInt(instID, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
conf.AppPrivateKey = string(data["github_app_private_key"])
|
||||||
|
|
||||||
|
return &conf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func refFromRunner(r *v1alpha1.Runner) *runnerOwnerRef {
|
||||||
|
return &runnerOwnerRef{
|
||||||
|
kind: r.Kind,
|
||||||
|
ns: r.Namespace,
|
||||||
|
name: r.Name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func refFromRunnerPod(po *corev1.Pod) *runnerOwnerRef {
|
||||||
|
return &runnerOwnerRef{
|
||||||
|
kind: po.Kind,
|
||||||
|
ns: po.Namespace,
|
||||||
|
name: po.Name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func refFromRunnerSet(rs *v1alpha1.RunnerSet) *runnerOwnerRef {
|
||||||
|
return &runnerOwnerRef{
|
||||||
|
kind: rs.Kind,
|
||||||
|
ns: rs.Namespace,
|
||||||
|
name: rs.Name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func refFromHorizontalRunnerAutoscaler(hra *v1alpha1.HorizontalRunnerAutoscaler) *runnerOwnerRef {
|
||||||
|
return &runnerOwnerRef{
|
||||||
|
kind: hra.Kind,
|
||||||
|
ns: hra.Namespace,
|
||||||
|
name: hra.Name,
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -10,7 +10,9 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newWorkGenericEphemeralVolume(t *testing.T, storageReq string) corev1.Volume {
|
func newWorkGenericEphemeralVolume(t *testing.T, storageReq string) corev1.Volume {
|
||||||
@@ -125,6 +127,10 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
Name: "RUNNER_EPHEMERAL",
|
Name: "RUNNER_EPHEMERAL",
|
||||||
Value: "true",
|
Value: "true",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||||
|
Value: "false",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "DOCKER_HOST",
|
Name: "DOCKER_HOST",
|
||||||
Value: "tcp://localhost:2376",
|
Value: "tcp://localhost:2376",
|
||||||
@@ -255,6 +261,10 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
Name: "RUNNER_EPHEMERAL",
|
Name: "RUNNER_EPHEMERAL",
|
||||||
Value: "true",
|
Value: "true",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||||
|
Value: "false",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
VolumeMounts: []corev1.VolumeMount{
|
VolumeMounts: []corev1.VolumeMount{
|
||||||
{
|
{
|
||||||
@@ -333,6 +343,10 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
Name: "RUNNER_EPHEMERAL",
|
Name: "RUNNER_EPHEMERAL",
|
||||||
Value: "true",
|
Value: "true",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||||
|
Value: "false",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
VolumeMounts: []corev1.VolumeMount{
|
VolumeMounts: []corev1.VolumeMount{
|
||||||
{
|
{
|
||||||
@@ -515,7 +529,7 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
for i := range testcases {
|
for i := range testcases {
|
||||||
tc := testcases[i]
|
tc := testcases[i]
|
||||||
t.Run(tc.description, func(t *testing.T) {
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
got, err := newRunnerPod(tc.template, tc.config, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL)
|
got, err := newRunnerPod(tc.template, tc.config, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, tc.want, got)
|
require.Equal(t, tc.want, got)
|
||||||
})
|
})
|
||||||
@@ -624,6 +638,10 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
Name: "RUNNER_EPHEMERAL",
|
Name: "RUNNER_EPHEMERAL",
|
||||||
Value: "true",
|
Value: "true",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||||
|
Value: "false",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "DOCKER_HOST",
|
Name: "DOCKER_HOST",
|
||||||
Value: "tcp://localhost:2376",
|
Value: "tcp://localhost:2376",
|
||||||
@@ -769,6 +787,10 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
Name: "RUNNER_EPHEMERAL",
|
Name: "RUNNER_EPHEMERAL",
|
||||||
Value: "true",
|
Value: "true",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||||
|
Value: "false",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "RUNNER_NAME",
|
Name: "RUNNER_NAME",
|
||||||
Value: "runner",
|
Value: "runner",
|
||||||
@@ -866,6 +888,10 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
Name: "RUNNER_EPHEMERAL",
|
Name: "RUNNER_EPHEMERAL",
|
||||||
Value: "true",
|
Value: "true",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||||
|
Value: "false",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "RUNNER_NAME",
|
Name: "RUNNER_NAME",
|
||||||
Value: "runner",
|
Value: "runner",
|
||||||
@@ -1105,13 +1131,20 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
|
|
||||||
for i := range testcases {
|
for i := range testcases {
|
||||||
tc := testcases[i]
|
tc := testcases[i]
|
||||||
|
|
||||||
|
rr := &testResourceReader{
|
||||||
|
objects: map[types.NamespacedName]client.Object{},
|
||||||
|
}
|
||||||
|
|
||||||
|
multiClient := NewMultiGitHubClient(rr, &github.Client{GithubBaseURL: githubBaseURL})
|
||||||
|
|
||||||
t.Run(tc.description, func(t *testing.T) {
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
r := &RunnerReconciler{
|
r := &RunnerReconciler{
|
||||||
RunnerImage: defaultRunnerImage,
|
RunnerImage: defaultRunnerImage,
|
||||||
RunnerImagePullSecrets: defaultRunnerImagePullSecrets,
|
RunnerImagePullSecrets: defaultRunnerImagePullSecrets,
|
||||||
DockerImage: defaultDockerImage,
|
DockerImage: defaultDockerImage,
|
||||||
DockerRegistryMirror: defaultDockerRegistryMirror,
|
DockerRegistryMirror: defaultDockerRegistryMirror,
|
||||||
GitHubClient: &github.Client{GithubBaseURL: githubBaseURL},
|
GitHubClient: multiClient,
|
||||||
Scheme: scheme,
|
Scheme: scheme,
|
||||||
}
|
}
|
||||||
got, err := r.newPod(tc.runner)
|
got, err := r.newPod(tc.runner)
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"gomodules.xyz/jsonpatch/v2"
|
"gomodules.xyz/jsonpatch/v2"
|
||||||
admissionv1 "k8s.io/api/admission/v1"
|
admissionv1 "k8s.io/api/admission/v1"
|
||||||
@@ -29,7 +28,7 @@ type PodRunnerTokenInjector struct {
|
|||||||
Name string
|
Name string
|
||||||
Log logr.Logger
|
Log logr.Logger
|
||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
GitHubClient *github.Client
|
GitHubClient *MultiGitHubClient
|
||||||
decoder *admission.Decoder
|
decoder *admission.Decoder
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -66,7 +65,12 @@ func (t *PodRunnerTokenInjector) Handle(ctx context.Context, req admission.Reque
|
|||||||
return newEmptyResponse()
|
return newEmptyResponse()
|
||||||
}
|
}
|
||||||
|
|
||||||
rt, err := t.GitHubClient.GetRegistrationToken(context.Background(), enterprise, org, repo, pod.Name)
|
ghc, err := t.GitHubClient.InitForRunnerPod(ctx, &pod)
|
||||||
|
if err != nil {
|
||||||
|
return admission.Errored(http.StatusInternalServerError, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rt, err := ghc.GetRegistrationToken(context.Background(), enterprise, org, repo, pod.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Log.Error(err, "Failed to get new registration token")
|
t.Log.Error(err, "Failed to get new registration token")
|
||||||
return admission.Errored(http.StatusInternalServerError, err)
|
return admission.Errored(http.StatusInternalServerError, err)
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -35,10 +36,10 @@ import (
|
|||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -51,6 +52,8 @@ const (
|
|||||||
|
|
||||||
EnvVarOrg = "RUNNER_ORG"
|
EnvVarOrg = "RUNNER_ORG"
|
||||||
EnvVarRepo = "RUNNER_REPO"
|
EnvVarRepo = "RUNNER_REPO"
|
||||||
|
EnvVarGroup = "RUNNER_GROUP"
|
||||||
|
EnvVarLabels = "RUNNER_LABELS"
|
||||||
EnvVarEnterprise = "RUNNER_ENTERPRISE"
|
EnvVarEnterprise = "RUNNER_ENTERPRISE"
|
||||||
EnvVarEphemeral = "RUNNER_EPHEMERAL"
|
EnvVarEphemeral = "RUNNER_EPHEMERAL"
|
||||||
EnvVarTrue = "true"
|
EnvVarTrue = "true"
|
||||||
@@ -62,7 +65,7 @@ type RunnerReconciler struct {
|
|||||||
Log logr.Logger
|
Log logr.Logger
|
||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
GitHubClient *github.Client
|
GitHubClient *MultiGitHubClient
|
||||||
RunnerImage string
|
RunnerImage string
|
||||||
RunnerImagePullSecrets []string
|
RunnerImagePullSecrets []string
|
||||||
DockerImage string
|
DockerImage string
|
||||||
@@ -70,8 +73,8 @@ type RunnerReconciler struct {
|
|||||||
Name string
|
Name string
|
||||||
RegistrationRecheckInterval time.Duration
|
RegistrationRecheckInterval time.Duration
|
||||||
RegistrationRecheckJitter time.Duration
|
RegistrationRecheckJitter time.Duration
|
||||||
|
UseRunnerStatusUpdateHook bool
|
||||||
UnregistrationRetryDelay time.Duration
|
UnregistrationRetryDelay time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners,verbs=get;list;watch;create;update;patch;delete
|
||||||
@@ -81,6 +84,9 @@ type RunnerReconciler struct {
|
|||||||
// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;delete
|
// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;delete
|
||||||
// +kubebuilder:rbac:groups=core,resources=pods/finalizers,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=core,resources=pods/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=create;delete;get
|
||||||
|
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=create;delete;get
|
||||||
|
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=create;delete;get
|
||||||
|
|
||||||
func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
log := r.Log.WithValues("runner", req.NamespacedName)
|
log := r.Log.WithValues("runner", req.NamespacedName)
|
||||||
@@ -116,6 +122,8 @@ func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
|
|||||||
return r.processRunnerDeletion(runner, ctx, log, nil)
|
return r.processRunnerDeletion(runner, ctx, log, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r.GitHubClient.DeinitForRunner(&runner)
|
||||||
|
|
||||||
return r.processRunnerDeletion(runner, ctx, log, &pod)
|
return r.processRunnerDeletion(runner, ctx, log, &pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -135,7 +143,7 @@ func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
|
|||||||
|
|
||||||
ready := runnerPodReady(&pod)
|
ready := runnerPodReady(&pod)
|
||||||
|
|
||||||
if runner.Status.Phase != phase || runner.Status.Ready != ready {
|
if (runner.Status.Phase != phase || runner.Status.Ready != ready) && !r.UseRunnerStatusUpdateHook || runner.Status.Phase == "" && r.UseRunnerStatusUpdateHook {
|
||||||
if pod.Status.Phase == corev1.PodRunning {
|
if pod.Status.Phase == corev1.PodRunning {
|
||||||
// Seeing this message, you can expect the runner to become `Running` soon.
|
// Seeing this message, you can expect the runner to become `Running` soon.
|
||||||
log.V(1).Info(
|
log.V(1).Info(
|
||||||
@@ -256,6 +264,96 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
needsServiceAccount := runner.Spec.ServiceAccountName == "" && (r.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes")
|
||||||
|
if needsServiceAccount {
|
||||||
|
serviceAccount := &corev1.ServiceAccount{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: runner.ObjectMeta.Name,
|
||||||
|
Namespace: runner.ObjectMeta.Namespace,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if res := r.createObject(ctx, serviceAccount, serviceAccount.ObjectMeta, &runner, log); res != nil {
|
||||||
|
return *res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
rules := []rbacv1.PolicyRule{}
|
||||||
|
|
||||||
|
if r.UseRunnerStatusUpdateHook {
|
||||||
|
rules = append(rules, []rbacv1.PolicyRule{
|
||||||
|
{
|
||||||
|
APIGroups: []string{"actions.summerwind.dev"},
|
||||||
|
Resources: []string{"runners/status"},
|
||||||
|
Verbs: []string{"get", "update", "patch"},
|
||||||
|
ResourceNames: []string{runner.ObjectMeta.Name},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if runner.Spec.ContainerMode == "kubernetes" {
|
||||||
|
// Permissions based on https://github.com/actions/runner-container-hooks/blob/main/packages/k8s/README.md
|
||||||
|
rules = append(rules, []rbacv1.PolicyRule{
|
||||||
|
{
|
||||||
|
APIGroups: []string{""},
|
||||||
|
Resources: []string{"pods"},
|
||||||
|
Verbs: []string{"get", "list", "create", "delete"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
APIGroups: []string{""},
|
||||||
|
Resources: []string{"pods/exec"},
|
||||||
|
Verbs: []string{"get", "create"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
APIGroups: []string{""},
|
||||||
|
Resources: []string{"pods/log"},
|
||||||
|
Verbs: []string{"get", "list", "watch"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
APIGroups: []string{"batch"},
|
||||||
|
Resources: []string{"jobs"},
|
||||||
|
Verbs: []string{"get", "list", "create", "delete"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
APIGroups: []string{""},
|
||||||
|
Resources: []string{"secrets"},
|
||||||
|
Verbs: []string{"get", "list", "create", "delete"},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
}
|
||||||
|
|
||||||
|
role := &rbacv1.Role{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: runner.ObjectMeta.Name,
|
||||||
|
Namespace: runner.ObjectMeta.Namespace,
|
||||||
|
},
|
||||||
|
Rules: rules,
|
||||||
|
}
|
||||||
|
if res := r.createObject(ctx, role, role.ObjectMeta, &runner, log); res != nil {
|
||||||
|
return *res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
roleBinding := &rbacv1.RoleBinding{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: runner.ObjectMeta.Name,
|
||||||
|
Namespace: runner.ObjectMeta.Namespace,
|
||||||
|
},
|
||||||
|
RoleRef: rbacv1.RoleRef{
|
||||||
|
APIGroup: "rbac.authorization.k8s.io",
|
||||||
|
Kind: "Role",
|
||||||
|
Name: runner.ObjectMeta.Name,
|
||||||
|
},
|
||||||
|
Subjects: []rbacv1.Subject{
|
||||||
|
{
|
||||||
|
Kind: "ServiceAccount",
|
||||||
|
Name: runner.ObjectMeta.Name,
|
||||||
|
Namespace: runner.ObjectMeta.Namespace,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if res := r.createObject(ctx, roleBinding, roleBinding.ObjectMeta, &runner, log); res != nil {
|
||||||
|
return *res, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err := r.Create(ctx, &newPod); err != nil {
|
if err := r.Create(ctx, &newPod); err != nil {
|
||||||
if kerrors.IsAlreadyExists(err) {
|
if kerrors.IsAlreadyExists(err) {
|
||||||
// Gracefully handle pod-already-exists errors due to informer cache delay.
|
// Gracefully handle pod-already-exists errors due to informer cache delay.
|
||||||
@@ -278,6 +376,27 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *RunnerReconciler) createObject(ctx context.Context, obj client.Object, meta metav1.ObjectMeta, runner *v1alpha1.Runner, log logr.Logger) *ctrl.Result {
|
||||||
|
kind := strings.Split(reflect.TypeOf(obj).String(), ".")[1]
|
||||||
|
if err := ctrl.SetControllerReference(runner, obj, r.Scheme); err != nil {
|
||||||
|
log.Error(err, fmt.Sprintf("Could not add owner reference to %s %s. %s", kind, meta.Name, err.Error()))
|
||||||
|
return &ctrl.Result{Requeue: true}
|
||||||
|
}
|
||||||
|
if err := r.Create(ctx, obj); err != nil {
|
||||||
|
if kerrors.IsAlreadyExists(err) {
|
||||||
|
log.Info(fmt.Sprintf("Failed to create %s %s as it already exists. Reusing existing %s", kind, meta.Name, kind))
|
||||||
|
r.Recorder.Event(runner, corev1.EventTypeNormal, fmt.Sprintf("%sReused", kind), fmt.Sprintf("Reused %s '%s'", kind, meta.Name))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Error(err, fmt.Sprintf("Retrying as failed to create %s %s resource", kind, meta.Name))
|
||||||
|
return &ctrl.Result{Requeue: true}
|
||||||
|
}
|
||||||
|
r.Recorder.Event(runner, corev1.EventTypeNormal, fmt.Sprintf("%sCreated", kind), fmt.Sprintf("Created %s '%s'", kind, meta.Name))
|
||||||
|
log.Info(fmt.Sprintf("Created %s", kind), "name", meta.Name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (r *RunnerReconciler) updateRegistrationToken(ctx context.Context, runner v1alpha1.Runner) (bool, error) {
|
func (r *RunnerReconciler) updateRegistrationToken(ctx context.Context, runner v1alpha1.Runner) (bool, error) {
|
||||||
if runner.IsRegisterable() {
|
if runner.IsRegisterable() {
|
||||||
return false, nil
|
return false, nil
|
||||||
@@ -285,7 +404,12 @@ func (r *RunnerReconciler) updateRegistrationToken(ctx context.Context, runner v
|
|||||||
|
|
||||||
log := r.Log.WithValues("runner", runner.Name)
|
log := r.Log.WithValues("runner", runner.Name)
|
||||||
|
|
||||||
rt, err := r.GitHubClient.GetRegistrationToken(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
ghc, err := r.GitHubClient.InitForRunner(ctx, &runner)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
rt, err := ghc.GetRegistrationToken(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// An error can be a permanent, permission issue like the below:
|
// An error can be a permanent, permission issue like the below:
|
||||||
// POST https://api.github.com/enterprises/YOUR_ENTERPRISE/actions/runners/registration-token: 403 Resource not accessible by integration []
|
// POST https://api.github.com/enterprises/YOUR_ENTERPRISE/actions/runners/registration-token: 403 Resource not accessible by integration []
|
||||||
@@ -325,6 +449,11 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
labels[k] = v
|
labels[k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ghc, err := r.GitHubClient.InitForRunner(context.Background(), &runner)
|
||||||
|
if err != nil {
|
||||||
|
return corev1.Pod{}, err
|
||||||
|
}
|
||||||
|
|
||||||
// This implies that...
|
// This implies that...
|
||||||
//
|
//
|
||||||
// (1) We recreate the runner pod whenever the runner has changes in:
|
// (1) We recreate the runner pod whenever the runner has changes in:
|
||||||
@@ -348,7 +477,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
filterLabels(runner.ObjectMeta.Labels, LabelKeyRunnerTemplateHash),
|
filterLabels(runner.ObjectMeta.Labels, LabelKeyRunnerTemplateHash),
|
||||||
runner.ObjectMeta.Annotations,
|
runner.ObjectMeta.Annotations,
|
||||||
runner.Spec,
|
runner.Spec,
|
||||||
r.GitHubClient.GithubBaseURL,
|
ghc.GithubBaseURL,
|
||||||
// Token change should trigger replacement.
|
// Token change should trigger replacement.
|
||||||
// We need to include this explicitly here because
|
// We need to include this explicitly here because
|
||||||
// runner.Spec does not contain the possibly updated token stored in the
|
// runner.Spec does not contain the possibly updated token stored in the
|
||||||
@@ -426,7 +555,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pod, err := newRunnerPodWithContainerMode(runner.Spec.ContainerMode, template, runner.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, r.GitHubClient.GithubBaseURL)
|
pod, err := newRunnerPodWithContainerMode(runner.Spec.ContainerMode, template, runner.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, ghc.GithubBaseURL, r.UseRunnerStatusUpdateHook)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pod, err
|
return pod, err
|
||||||
}
|
}
|
||||||
@@ -474,9 +603,13 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
if runnerSpec.NodeSelector != nil {
|
if runnerSpec.NodeSelector != nil {
|
||||||
pod.Spec.NodeSelector = runnerSpec.NodeSelector
|
pod.Spec.NodeSelector = runnerSpec.NodeSelector
|
||||||
}
|
}
|
||||||
|
|
||||||
if runnerSpec.ServiceAccountName != "" {
|
if runnerSpec.ServiceAccountName != "" {
|
||||||
pod.Spec.ServiceAccountName = runnerSpec.ServiceAccountName
|
pod.Spec.ServiceAccountName = runnerSpec.ServiceAccountName
|
||||||
|
} else if r.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes" {
|
||||||
|
pod.Spec.ServiceAccountName = runner.ObjectMeta.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
if runnerSpec.AutomountServiceAccountToken != nil {
|
if runnerSpec.AutomountServiceAccountToken != nil {
|
||||||
pod.Spec.AutomountServiceAccountToken = runnerSpec.AutomountServiceAccountToken
|
pod.Spec.AutomountServiceAccountToken = runnerSpec.AutomountServiceAccountToken
|
||||||
}
|
}
|
||||||
@@ -517,6 +650,10 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
pod.Spec.HostAliases = runnerSpec.HostAliases
|
pod.Spec.HostAliases = runnerSpec.HostAliases
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if runnerSpec.DnsPolicy != "" {
|
||||||
|
pod.Spec.DNSPolicy = runnerSpec.DnsPolicy
|
||||||
|
}
|
||||||
|
|
||||||
if runnerSpec.DnsConfig != nil {
|
if runnerSpec.DnsConfig != nil {
|
||||||
pod.Spec.DNSConfig = runnerSpec.DnsConfig
|
pod.Spec.DNSConfig = runnerSpec.DnsConfig
|
||||||
}
|
}
|
||||||
@@ -582,14 +719,14 @@ func runnerHookEnvs(pod *corev1.Pod) ([]corev1.EnvVar, error) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
corev1.EnvVar{
|
{
|
||||||
Name: "ACTIONS_RUNNER_REQUIRE_SAME_NODE",
|
Name: "ACTIONS_RUNNER_REQUIRE_SAME_NODE",
|
||||||
Value: strconv.FormatBool(isRequireSameNode),
|
Value: strconv.FormatBool(isRequireSameNode),
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string) (corev1.Pod, error) {
|
func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string, useRunnerStatusUpdateHook bool) (corev1.Pod, error) {
|
||||||
var (
|
var (
|
||||||
privileged bool = true
|
privileged bool = true
|
||||||
dockerdInRunner bool = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
|
dockerdInRunner bool = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
|
||||||
@@ -609,6 +746,9 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
|||||||
// This label selector is used by default when rd.Spec.Selector is empty.
|
// This label selector is used by default when rd.Spec.Selector is empty.
|
||||||
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunner, "")
|
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunner, "")
|
||||||
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyPodMutation, LabelValuePodMutation)
|
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyPodMutation, LabelValuePodMutation)
|
||||||
|
if runnerSpec.GitHubAPICredentialsFrom != nil {
|
||||||
|
template.ObjectMeta.Annotations = CloneAndAddLabel(template.ObjectMeta.Annotations, annotationKeyGitHubAPICredsSecret, runnerSpec.GitHubAPICredentialsFrom.SecretRef.Name)
|
||||||
|
}
|
||||||
|
|
||||||
workDir := runnerSpec.WorkDir
|
workDir := runnerSpec.WorkDir
|
||||||
if workDir == "" {
|
if workDir == "" {
|
||||||
@@ -638,11 +778,11 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
|||||||
Value: runnerSpec.Enterprise,
|
Value: runnerSpec.Enterprise,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "RUNNER_LABELS",
|
Name: EnvVarLabels,
|
||||||
Value: strings.Join(runnerSpec.Labels, ","),
|
Value: strings.Join(runnerSpec.Labels, ","),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "RUNNER_GROUP",
|
Name: EnvVarGroup,
|
||||||
Value: runnerSpec.Group,
|
Value: runnerSpec.Group,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -665,6 +805,10 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
|||||||
Name: EnvVarEphemeral,
|
Name: EnvVarEphemeral,
|
||||||
Value: fmt.Sprintf("%v", ephemeral),
|
Value: fmt.Sprintf("%v", ephemeral),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||||
|
Value: fmt.Sprintf("%v", useRunnerStatusUpdateHook),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var seLinuxOptions *corev1.SELinuxOptions
|
var seLinuxOptions *corev1.SELinuxOptions
|
||||||
@@ -694,7 +838,7 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
|||||||
if dockerdContainer != nil {
|
if dockerdContainer != nil {
|
||||||
template.Spec.Containers = append(template.Spec.Containers[:dockerdContainerIndex], template.Spec.Containers[dockerdContainerIndex+1:]...)
|
template.Spec.Containers = append(template.Spec.Containers[:dockerdContainerIndex], template.Spec.Containers[dockerdContainerIndex+1:]...)
|
||||||
}
|
}
|
||||||
if runnerContainerIndex < runnerContainerIndex {
|
if dockerdContainerIndex < runnerContainerIndex {
|
||||||
runnerContainerIndex--
|
runnerContainerIndex--
|
||||||
}
|
}
|
||||||
dockerdContainer = nil
|
dockerdContainer = nil
|
||||||
@@ -928,6 +1072,74 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
|||||||
},
|
},
|
||||||
}...)
|
}...)
|
||||||
|
|
||||||
|
// This let dockerd to create container's network interface to have the specified MTU.
|
||||||
|
// In other words, this is for setting com.docker.network.driver.mtu in the docker bridge options.
|
||||||
|
// You can see the options by running `docker network inspect bridge`, where you will see something like the below when spec.dockerMTU=1400:
|
||||||
|
//
|
||||||
|
// "Options": {
|
||||||
|
// "com.docker.network.bridge.default_bridge": "true",
|
||||||
|
// "com.docker.network.bridge.enable_icc": "true",
|
||||||
|
// "com.docker.network.bridge.enable_ip_masquerade": "true",
|
||||||
|
// "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
|
||||||
|
// "com.docker.network.bridge.name": "docker0",
|
||||||
|
// "com.docker.network.driver.mtu": "1400"
|
||||||
|
// },
|
||||||
|
//
|
||||||
|
// See e.g. https://forums.docker.com/t/changing-mtu-value/74114 and https://mlohr.com/docker-mtu/ for more details.
|
||||||
|
//
|
||||||
|
// Note though, this doesn't immediately affect docker0's MTU, and the MTU of the docker network created with docker-create-network:
|
||||||
|
// You can verity that by running `ip link` within the containers:
|
||||||
|
//
|
||||||
|
// # ip link
|
||||||
|
// 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000
|
||||||
|
// link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
|
||||||
|
// 2: eth0@if1118: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP
|
||||||
|
// link/ether c2:dd:e6:66:8e:8b brd ff:ff:ff:ff:ff:ff
|
||||||
|
// 3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
|
||||||
|
// link/ether 02:42:ab:1c:83:69 brd ff:ff:ff:ff:ff:ff
|
||||||
|
// 4: br-c5bf6c172bd7: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
|
||||||
|
// link/ether 02:42:e2:91:13:1e brd ff:ff:ff:ff:ff:ff
|
||||||
|
//
|
||||||
|
// br-c5bf6c172bd7 is the interface that corresponds to the docker network created with docker-create-network.
|
||||||
|
// We have another ARC feature to inherit the host's MTU to the docker networks:
|
||||||
|
// https://github.com/actions-runner-controller/actions-runner-controller/pull/1201
|
||||||
|
//
|
||||||
|
// docker's MTU is updated to the specified MTU once any container is created.
|
||||||
|
// You can verity that by running a random container from within the runner or dockerd containers:
|
||||||
|
//
|
||||||
|
// / # docker run -d busybox sh -c 'sleep 10'
|
||||||
|
// e848e6acd6404ca0199e4d9c5ef485d88c974ddfb7aaf2359c66811f68cf5e42
|
||||||
|
//
|
||||||
|
// You'll now see the veth767f1a5@if7 got created with the MTU inherited by dockerd:
|
||||||
|
//
|
||||||
|
// / # ip link
|
||||||
|
// 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000
|
||||||
|
// link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
|
||||||
|
// 2: eth0@if1118: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP
|
||||||
|
// link/ether c2:dd:e6:66:8e:8b brd ff:ff:ff:ff:ff:ff
|
||||||
|
// 3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1400 qdisc noqueue state UP
|
||||||
|
// link/ether 02:42:ab:1c:83:69 brd ff:ff:ff:ff:ff:ff
|
||||||
|
// 4: br-c5bf6c172bd7: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
|
||||||
|
// link/ether 02:42:e2:91:13:1e brd ff:ff:ff:ff:ff:ff
|
||||||
|
// 8: veth767f1a5@if7: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1400 qdisc noqueue master docker0 state UP
|
||||||
|
// link/ether 82:d5:08:28:d8:98 brd ff:ff:ff:ff:ff:ff
|
||||||
|
//
|
||||||
|
// # After 10 seconds sleep, you can see the container stops and the veth767f1a5@if7 interface got deleted:
|
||||||
|
//
|
||||||
|
// / # ip link
|
||||||
|
// 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000
|
||||||
|
// link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
|
||||||
|
// 2: eth0@if1118: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP
|
||||||
|
// link/ether c2:dd:e6:66:8e:8b brd ff:ff:ff:ff:ff:ff
|
||||||
|
// 3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
|
||||||
|
// link/ether 02:42:ab:1c:83:69 brd ff:ff:ff:ff:ff:ff
|
||||||
|
// 4: br-c5bf6c172bd7: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
|
||||||
|
// link/ether 02:42:e2:91:13:1e brd ff:ff:ff:ff:ff:ff
|
||||||
|
//
|
||||||
|
// See https://github.com/moby/moby/issues/26382#issuecomment-246906331 for reference.
|
||||||
|
//
|
||||||
|
// Probably we'd better infer DockerMTU from the host's primary interface's MTU and docker0's MTU?
|
||||||
|
// That's another story- if you want it, please start a thread in GitHub Discussions!
|
||||||
dockerdContainer.Args = append(dockerdContainer.Args,
|
dockerdContainer.Args = append(dockerdContainer.Args,
|
||||||
"--mtu",
|
"--mtu",
|
||||||
fmt.Sprintf("%d", *runnerSpec.DockerMTU),
|
fmt.Sprintf("%d", *runnerSpec.DockerMTU),
|
||||||
@@ -962,8 +1174,8 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
|||||||
return *pod, nil
|
return *pod, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRunnerPod(template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string) (corev1.Pod, error) {
|
func newRunnerPod(template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string, useRunnerStatusUpdateHookEphemeralRole bool) (corev1.Pod, error) {
|
||||||
return newRunnerPodWithContainerMode("", template, runnerSpec, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL)
|
return newRunnerPodWithContainerMode("", template, runnerSpec, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL, useRunnerStatusUpdateHookEphemeralRole)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
@@ -1086,13 +1298,3 @@ func isRequireSameNode(pod *corev1.Pod) (bool, error) {
|
|||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func overwriteRunnerEnv(runner *v1alpha1.Runner, key string, value string) {
|
|
||||||
for i := range runner.Spec.Env {
|
|
||||||
if runner.Spec.Env[i].Name == key {
|
|
||||||
runner.Spec.Env[i].Value = value
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
runner.Spec.Env = append(runner.Spec.Env, corev1.EnvVar{Name: key, Value: value})
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
gogithub "github.com/google/go-github/v45/github"
|
gogithub "github.com/google/go-github/v47/github"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
@@ -98,11 +98,27 @@ func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, l
|
|||||||
// If it's already unregistered in the previous reconcilation loop,
|
// If it's already unregistered in the previous reconcilation loop,
|
||||||
// you can safely assume that it won't get registered again so it's safe to delete the runner pod.
|
// you can safely assume that it won't get registered again so it's safe to delete the runner pod.
|
||||||
log.Info("Runner pod is marked as already unregistered.")
|
log.Info("Runner pod is marked as already unregistered.")
|
||||||
} else if runnerID == nil {
|
} else if runnerID == nil && !runnerPodOrContainerIsStopped(pod) && !podConditionTransitionTimeAfter(pod, corev1.PodReady, registrationTimeout) {
|
||||||
log.Info(
|
log.Info(
|
||||||
"Unregistration started before runner ID is assigned. " +
|
"Unregistration started before runner obtains ID. Waiting for the regisration timeout to elapse, or the runner to obtain ID, or the runner pod to stop",
|
||||||
"Perhaps the runner pod was terminated by anyone other than ARC? Was it OOM killed? " +
|
"registrationTimeout", registrationTimeout,
|
||||||
|
)
|
||||||
|
return &ctrl.Result{RequeueAfter: retryDelay}, nil
|
||||||
|
} else if runnerID == nil && runnerPodOrContainerIsStopped(pod) {
|
||||||
|
log.Info(
|
||||||
|
"Unregistration started before runner ID is assigned and the runner stopped before obtaining ID within registration timeout. "+
|
||||||
|
"Perhaps the runner successfully ran the job and stopped normally before the runner ID becomes visible via GitHub API? "+
|
||||||
|
"Perhaps the runner pod was terminated by anyone other than ARC? Was it OOM killed? "+
|
||||||
"Marking unregistration as completed anyway because there's nothing ARC can do.",
|
"Marking unregistration as completed anyway because there's nothing ARC can do.",
|
||||||
|
"registrationTimeout", registrationTimeout,
|
||||||
|
)
|
||||||
|
} else if runnerID == nil && podConditionTransitionTimeAfter(pod, corev1.PodReady, registrationTimeout) {
|
||||||
|
log.Info(
|
||||||
|
"Unregistration started before runner ID is assigned and the runner was unable to obtain ID within registration timeout. "+
|
||||||
|
"Perhaps the runner has communication issue, or a firewall egress rule is dropping traffic to GitHub API, or GitHub API is unavailable? "+
|
||||||
|
"Marking unregistration as completed anyway because there's nothing ARC can do. "+
|
||||||
|
"This may result in in cancelling the job depending on your terminationGracePeriodSeconds and RUNNER_GRACEFUL_STOP_TIMEOUT settings.",
|
||||||
|
"registrationTimeout", registrationTimeout,
|
||||||
)
|
)
|
||||||
} else if pod != nil && runnerPodOrContainerIsStopped(pod) {
|
} else if pod != nil && runnerPodOrContainerIsStopped(pod) {
|
||||||
// If it's an ephemeral runner with the actions/runner container exited with 0,
|
// If it's an ephemeral runner with the actions/runner container exited with 0,
|
||||||
@@ -351,21 +367,22 @@ func setRunnerEnv(pod *corev1.Pod, key, value string) {
|
|||||||
// Case 1. (true, nil) when it has successfully unregistered the runner.
|
// Case 1. (true, nil) when it has successfully unregistered the runner.
|
||||||
// Case 2. (false, nil) when (2-1.) the runner has been already unregistered OR (2-2.) the runner will never be created OR (2-3.) the runner is not created yet and it is about to be registered(hence we couldn't see it's existence from GitHub Actions API yet)
|
// Case 2. (false, nil) when (2-1.) the runner has been already unregistered OR (2-2.) the runner will never be created OR (2-3.) the runner is not created yet and it is about to be registered(hence we couldn't see it's existence from GitHub Actions API yet)
|
||||||
// Case 3. (false, err) when it postponed unregistration due to the runner being busy, or it tried to unregister the runner but failed due to
|
// Case 3. (false, err) when it postponed unregistration due to the runner being busy, or it tried to unregister the runner but failed due to
|
||||||
// an error returned by GitHub API.
|
//
|
||||||
|
// an error returned by GitHub API.
|
||||||
//
|
//
|
||||||
// When the returned values is "Case 2. (false, nil)", the caller must handle the three possible sub-cases appropriately.
|
// When the returned values is "Case 2. (false, nil)", the caller must handle the three possible sub-cases appropriately.
|
||||||
// In other words, all those three sub-cases cannot be distinguished by this function alone.
|
// In other words, all those three sub-cases cannot be distinguished by this function alone.
|
||||||
//
|
//
|
||||||
// - Case "2-1." can happen when e.g. ARC has successfully unregistered in a previous reconcilation loop or it was an ephemeral runner that finished it's job run(an ephemeral runner is designed to stop after a job run).
|
// - Case "2-1." can happen when e.g. ARC has successfully unregistered in a previous reconcilation loop or it was an ephemeral runner that finished it's job run(an ephemeral runner is designed to stop after a job run).
|
||||||
// You'd need to maintain the runner state(i.e. if it's already unregistered or not) somewhere,
|
// You'd need to maintain the runner state(i.e. if it's already unregistered or not) somewhere,
|
||||||
// so that you can either not call this function at all if the runner state says it's already unregistered, or determine that it's case "2-1." when you got (false, nil).
|
// so that you can either not call this function at all if the runner state says it's already unregistered, or determine that it's case "2-1." when you got (false, nil).
|
||||||
//
|
//
|
||||||
// - Case "2-2." can happen when e.g. the runner registration token was somehow broken so that `config.sh` within the runner container was never meant to succeed.
|
// - Case "2-2." can happen when e.g. the runner registration token was somehow broken so that `config.sh` within the runner container was never meant to succeed.
|
||||||
// Waiting and retrying forever on this case is not a solution, because `config.sh` won't succeed with a wrong token hence the runner gets stuck in this state forever.
|
// Waiting and retrying forever on this case is not a solution, because `config.sh` won't succeed with a wrong token hence the runner gets stuck in this state forever.
|
||||||
// There isn't a perfect solution to this, but a practical workaround would be implement a "grace period" in the caller side.
|
// There isn't a perfect solution to this, but a practical workaround would be implement a "grace period" in the caller side.
|
||||||
//
|
//
|
||||||
// - Case "2-3." can happen when e.g. ARC recreated an ephemral runner pod in a previous reconcilation loop and then it was requested to delete the runner before the runner comes up.
|
// - Case "2-3." can happen when e.g. ARC recreated an ephemral runner pod in a previous reconcilation loop and then it was requested to delete the runner before the runner comes up.
|
||||||
// If handled inappropriately, this can cause a race condition betweeen a deletion of the runner pod and GitHub scheduling a workflow job onto the runner.
|
// If handled inappropriately, this can cause a race condition betweeen a deletion of the runner pod and GitHub scheduling a workflow job onto the runner.
|
||||||
//
|
//
|
||||||
// Once successfully detected case "2-1." or "2-2.", you can safely delete the runner pod because you know that the runner won't come back
|
// Once successfully detected case "2-1." or "2-2.", you can safely delete the runner pod because you know that the runner won't come back
|
||||||
// as long as you recreate the runner pod.
|
// as long as you recreate the runner pod.
|
||||||
|
|||||||
@@ -27,13 +27,14 @@ import (
|
|||||||
|
|
||||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
arcv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
corev1 "k8s.io/api/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RunnerPodReconciler reconciles a Runner object
|
// RunnerPodReconciler reconciles a Runner object
|
||||||
@@ -42,7 +43,7 @@ type RunnerPodReconciler struct {
|
|||||||
Log logr.Logger
|
Log logr.Logger
|
||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
GitHubClient *github.Client
|
GitHubClient *MultiGitHubClient
|
||||||
Name string
|
Name string
|
||||||
RegistrationRecheckInterval time.Duration
|
RegistrationRecheckInterval time.Duration
|
||||||
RegistrationRecheckJitter time.Duration
|
RegistrationRecheckJitter time.Duration
|
||||||
@@ -97,6 +98,11 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ghc, err := r.GitHubClient.InitForRunnerPod(ctx, &runnerPod)
|
||||||
|
if err != nil {
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
if runnerPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
if runnerPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
finalizers, added := addFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
finalizers, added := addFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
||||||
|
|
||||||
@@ -121,6 +127,21 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
} else {
|
} else {
|
||||||
log.V(2).Info("Seen deletion-timestamp is already set")
|
log.V(2).Info("Seen deletion-timestamp is already set")
|
||||||
|
|
||||||
|
// Mark the parent Runner resource for deletion before deleting this runner pod from the cluster.
|
||||||
|
// Otherwise the runner controller can recreate the runner pod thinking it has not created any runner pod yet.
|
||||||
|
var (
|
||||||
|
key = types.NamespacedName{Namespace: runnerPod.Namespace, Name: runnerPod.Name}
|
||||||
|
runner arcv1alpha1.Runner
|
||||||
|
)
|
||||||
|
if err := r.Get(ctx, key, &runner); err == nil {
|
||||||
|
if runner.Name != "" && runner.DeletionTimestamp == nil {
|
||||||
|
log.Info("This runner pod seems to have been deleted directly, bypassing the parent Runner resource. Marking the runner for deletion to not let it recreate this pod.")
|
||||||
|
if err := r.Delete(ctx, &runner); err != nil {
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerLinkedResourcesFinalizerName); removed {
|
if finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerLinkedResourcesFinalizerName); removed {
|
||||||
if err := r.cleanupRunnerLinkedPods(ctx, &runnerPod, log); err != nil {
|
if err := r.cleanupRunnerLinkedPods(ctx, &runnerPod, log); err != nil {
|
||||||
log.Info("Runner-linked pods clean up that has failed due to an error. If this persists, please manually remove the runner-linked pods to unblock ARC", "err", err.Error())
|
log.Info("Runner-linked pods clean up that has failed due to an error. If this persists, please manually remove the runner-linked pods to unblock ARC", "err", err.Error())
|
||||||
@@ -148,7 +169,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
// In a standard scenario, the upstream controller, like runnerset-controller, ensures this runner to be gracefully stopped before the deletion timestamp is set.
|
// In a standard scenario, the upstream controller, like runnerset-controller, ensures this runner to be gracefully stopped before the deletion timestamp is set.
|
||||||
// But for the case that the user manually deleted it for whatever reason,
|
// But for the case that the user manually deleted it for whatever reason,
|
||||||
// we have to ensure it to gracefully stop now.
|
// we have to ensure it to gracefully stop now.
|
||||||
updatedPod, res, err := tickRunnerGracefulStop(ctx, r.unregistrationRetryDelay(), log, r.GitHubClient, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
|
updatedPod, res, err := tickRunnerGracefulStop(ctx, r.unregistrationRetryDelay(), log, ghc, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
|
||||||
if res != nil {
|
if res != nil {
|
||||||
return *res, err
|
return *res, err
|
||||||
}
|
}
|
||||||
@@ -164,6 +185,8 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
|
|
||||||
log.V(2).Info("Removed finalizer")
|
log.V(2).Info("Removed finalizer")
|
||||||
|
|
||||||
|
r.GitHubClient.DeinitForRunnerPod(updatedPod)
|
||||||
|
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -202,7 +225,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
po, res, err := ensureRunnerPodRegistered(ctx, log, r.GitHubClient, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
|
po, res, err := ensureRunnerPodRegistered(ctx, log, ghc, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
|
||||||
if res != nil {
|
if res != nil {
|
||||||
return *res, err
|
return *res, err
|
||||||
}
|
}
|
||||||
@@ -216,7 +239,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
//
|
//
|
||||||
// In a standard scenario, ARC starts the unregistration process before marking the pod for deletion at all,
|
// In a standard scenario, ARC starts the unregistration process before marking the pod for deletion at all,
|
||||||
// so that it isn't subject to terminationGracePeriod and can safely take hours to finish it's work.
|
// so that it isn't subject to terminationGracePeriod and can safely take hours to finish it's work.
|
||||||
_, res, err := tickRunnerGracefulStop(ctx, r.unregistrationRetryDelay(), log, r.GitHubClient, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
|
_, res, err := tickRunnerGracefulStop(ctx, r.unregistrationRetryDelay(), log, ghc, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
|
||||||
if res != nil {
|
if res != nil {
|
||||||
return *res, err
|
return *res, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -315,7 +315,7 @@ func syncRunnerPodsOwners(ctx context.Context, c client.Client, log logr.Logger,
|
|||||||
numOwners := len(owners)
|
numOwners := len(owners)
|
||||||
|
|
||||||
var hashes []string
|
var hashes []string
|
||||||
for h, _ := range state.podsForOwners {
|
for h := range state.podsForOwners {
|
||||||
hashes = append(hashes, h)
|
hashes = append(hashes, h)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -165,6 +165,8 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.V(1).Info("Updated runnerreplicaset due to selector change")
|
||||||
|
|
||||||
// At this point, we are already sure that there's no need to create a new replicaset
|
// At this point, we are already sure that there's no need to create a new replicaset
|
||||||
// as the runner template hash is not changed.
|
// as the runner template hash is not changed.
|
||||||
//
|
//
|
||||||
@@ -182,7 +184,14 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||||||
//
|
//
|
||||||
// If we missed taking the EffectiveTime diff into account, you might end up experiencing scale-ups being delayed scale-down.
|
// If we missed taking the EffectiveTime diff into account, you might end up experiencing scale-ups being delayed scale-down.
|
||||||
// See https://github.com/actions-runner-controller/actions-runner-controller/pull/1477#issuecomment-1164154496
|
// See https://github.com/actions-runner-controller/actions-runner-controller/pull/1477#issuecomment-1164154496
|
||||||
if currentDesiredReplicas != newDesiredReplicas || newestSet.Spec.EffectiveTime != rd.Spec.EffectiveTime {
|
var et1, et2 time.Time
|
||||||
|
if newestSet.Spec.EffectiveTime != nil {
|
||||||
|
et1 = newestSet.Spec.EffectiveTime.Time
|
||||||
|
}
|
||||||
|
if rd.Spec.EffectiveTime != nil {
|
||||||
|
et2 = rd.Spec.EffectiveTime.Time
|
||||||
|
}
|
||||||
|
if currentDesiredReplicas != newDesiredReplicas || et1 != et2 {
|
||||||
newestSet.Spec.Replicas = &newDesiredReplicas
|
newestSet.Spec.Replicas = &newDesiredReplicas
|
||||||
newestSet.Spec.EffectiveTime = rd.Spec.EffectiveTime
|
newestSet.Spec.EffectiveTime = rd.Spec.EffectiveTime
|
||||||
|
|
||||||
@@ -192,6 +201,13 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.V(1).Info("Updated runnerreplicaset due to spec change",
|
||||||
|
"currentDesiredReplicas", currentDesiredReplicas,
|
||||||
|
"newDesiredReplicas", newDesiredReplicas,
|
||||||
|
"currentEffectiveTime", newestSet.Spec.EffectiveTime,
|
||||||
|
"newEffectiveTime", rd.Spec.EffectiveTime,
|
||||||
|
)
|
||||||
|
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -32,17 +32,15 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// RunnerReplicaSetReconciler reconciles a Runner object
|
// RunnerReplicaSetReconciler reconciles a Runner object
|
||||||
type RunnerReplicaSetReconciler struct {
|
type RunnerReplicaSetReconciler struct {
|
||||||
client.Client
|
client.Client
|
||||||
Log logr.Logger
|
Log logr.Logger
|
||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
GitHubClient *github.Client
|
Name string
|
||||||
Name string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
@@ -52,15 +52,13 @@ func SetupTest(ctx2 context.Context) *corev1.Namespace {
|
|||||||
|
|
||||||
runnersList = fake.NewRunnersList()
|
runnersList = fake.NewRunnersList()
|
||||||
server = runnersList.GetServer()
|
server = runnersList.GetServer()
|
||||||
ghClient := newGithubClient(server)
|
|
||||||
|
|
||||||
controller := &RunnerReplicaSetReconciler{
|
controller := &RunnerReplicaSetReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Scheme: scheme.Scheme,
|
Scheme: scheme.Scheme,
|
||||||
Log: logf.Log,
|
Log: logf.Log,
|
||||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||||
GitHubClient: ghClient,
|
Name: "runnerreplicaset-" + ns.Name,
|
||||||
Name: "runnerreplicaset-" + ns.Name,
|
|
||||||
}
|
}
|
||||||
err = controller.SetupWithManager(mgr)
|
err = controller.SetupWithManager(mgr)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||||
|
|||||||
@@ -45,12 +45,13 @@ type RunnerSetReconciler struct {
|
|||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
|
|
||||||
CommonRunnerLabels []string
|
CommonRunnerLabels []string
|
||||||
GitHubBaseURL string
|
GitHubClient *MultiGitHubClient
|
||||||
RunnerImage string
|
RunnerImage string
|
||||||
RunnerImagePullSecrets []string
|
RunnerImagePullSecrets []string
|
||||||
DockerImage string
|
DockerImage string
|
||||||
DockerRegistryMirror string
|
DockerRegistryMirror string
|
||||||
|
UseRunnerStatusUpdateHook bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets,verbs=get;list;watch;create;update;patch;delete
|
||||||
@@ -80,6 +81,8 @@ func (r *RunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !runnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
|
if !runnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
r.GitHubClient.DeinitForRunnerSet(runnerSet)
|
||||||
|
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,7 +100,7 @@ func (r *RunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
desiredStatefulSet, err := r.newStatefulSet(runnerSet)
|
desiredStatefulSet, err := r.newStatefulSet(ctx, runnerSet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Recorder.Event(runnerSet, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
r.Recorder.Event(runnerSet, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
||||||
|
|
||||||
@@ -185,7 +188,7 @@ func getRunnerSetSelector(runnerSet *v1alpha1.RunnerSet) *metav1.LabelSelector {
|
|||||||
var LabelKeyPodMutation = "actions-runner-controller/inject-registration-token"
|
var LabelKeyPodMutation = "actions-runner-controller/inject-registration-token"
|
||||||
var LabelValuePodMutation = "true"
|
var LabelValuePodMutation = "true"
|
||||||
|
|
||||||
func (r *RunnerSetReconciler) newStatefulSet(runnerSet *v1alpha1.RunnerSet) (*appsv1.StatefulSet, error) {
|
func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1alpha1.RunnerSet) (*appsv1.StatefulSet, error) {
|
||||||
runnerSetWithOverrides := *runnerSet.Spec.DeepCopy()
|
runnerSetWithOverrides := *runnerSet.Spec.DeepCopy()
|
||||||
|
|
||||||
runnerSetWithOverrides.Labels = append(runnerSetWithOverrides.Labels, r.CommonRunnerLabels...)
|
runnerSetWithOverrides.Labels = append(runnerSetWithOverrides.Labels, r.CommonRunnerLabels...)
|
||||||
@@ -221,7 +224,14 @@ func (r *RunnerSetReconciler) newStatefulSet(runnerSet *v1alpha1.RunnerSet) (*ap
|
|||||||
|
|
||||||
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunnerSetName, runnerSet.Name)
|
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunnerSetName, runnerSet.Name)
|
||||||
|
|
||||||
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, template, runnerSet.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, r.GitHubBaseURL)
|
ghc, err := r.GitHubClient.InitForRunnerSet(ctx, runnerSet)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
githubBaseURL := ghc.GithubBaseURL
|
||||||
|
|
||||||
|
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, template, runnerSet.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, githubBaseURL, r.UseRunnerStatusUpdateHook)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -75,6 +75,10 @@ func syncPVC(ctx context.Context, c client.Client, log logr.Logger, ns string, p
|
|||||||
|
|
||||||
log.V(2).Info("Reconciling runner PVC")
|
log.V(2).Info("Reconciling runner PVC")
|
||||||
|
|
||||||
|
// TODO: Probably we'd better remove PVCs related to the RunnetSet that is nowhere now?
|
||||||
|
// Otherwise, a bunch of continuously recreated StatefulSet
|
||||||
|
// can leave dangling PVCs forever, which might stress the cluster.
|
||||||
|
|
||||||
var sts appsv1.StatefulSet
|
var sts appsv1.StatefulSet
|
||||||
if err := c.Get(ctx, types.NamespacedName{Namespace: ns, Name: stsName}, &sts); err != nil {
|
if err := c.Get(ctx, types.NamespacedName{Namespace: ns, Name: stsName}, &sts); err != nil {
|
||||||
if !kerrors.IsNotFound(err) {
|
if !kerrors.IsNotFound(err) {
|
||||||
|
|||||||
32
controllers/testresourcereader.go
Normal file
32
controllers/testresourcereader.go
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testResourceReader struct {
|
||||||
|
objects map[types.NamespacedName]client.Object
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *testResourceReader) Get(_ context.Context, key client.ObjectKey, obj client.Object, _ ...client.GetOption) error {
|
||||||
|
nsName := types.NamespacedName{Namespace: key.Namespace, Name: key.Name}
|
||||||
|
ret, ok := r.objects[nsName]
|
||||||
|
if !ok {
|
||||||
|
return &kerrors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonNotFound}}
|
||||||
|
}
|
||||||
|
v := reflect.ValueOf(obj)
|
||||||
|
if v.Kind() != reflect.Ptr {
|
||||||
|
return errors.New("obj must be a pointer")
|
||||||
|
}
|
||||||
|
|
||||||
|
v.Elem().Set(reflect.ValueOf(ret).Elem())
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
35
controllers/testresourcereader_test.go
Normal file
35
controllers/testresourcereader_test.go
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestResourceReader(t *testing.T) {
|
||||||
|
rr := &testResourceReader{
|
||||||
|
objects: map[types.NamespacedName]client.Object{
|
||||||
|
{Namespace: "default", Name: "sec1"}: &corev1.Secret{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Namespace: "default",
|
||||||
|
Name: "sec1",
|
||||||
|
},
|
||||||
|
Data: map[string][]byte{
|
||||||
|
"foo": []byte("bar"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var sec corev1.Secret
|
||||||
|
|
||||||
|
err := rr.Get(context.Background(), types.NamespacedName{Namespace: "default", Name: "sec1"}, &sec)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, []byte("bar"), sec.Data["foo"])
|
||||||
|
}
|
||||||
133
docs/Actions-Runner-Controller-Overview.md
Normal file
133
docs/Actions-Runner-Controller-Overview.md
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
## Introduction
|
||||||
|
This document provides a high-level overview of Actions Runner Controller (ARC). ARC enables running Github Actions Runners on Kubernetes (K8s) clusters.
|
||||||
|
|
||||||
|
With this overview, you can get a foundation of basic scenarios and be capable of reviewing other advanced topics.
|
||||||
|
|
||||||
|
## GitHub Actions
|
||||||
|
[GitHub Actions](https://github.com/features/actions) is a continuous integration and continuous delivery (CI/CD) platform to automate your build, test, and deployment pipeline.
|
||||||
|
|
||||||
|
You can create workflows that build and test every pull request to your repository, or deploy merged pull requests to production. Your workflow contains one or more jobs which can run in sequential order or in parallel. Each job will run inside its own runner and has one or more steps that either run a script that you define or run an action, which is a reusable extension that can simplify your workflow. To learn more about Actions - see "[Learn Github Actions](https://docs.github.com/en/actions/learn-github-actions)".
|
||||||
|
|
||||||
|
## Runners
|
||||||
|
Runners execute the job that is assigned to them by Github Actions workflow. There are two types of Runners:
|
||||||
|
|
||||||
|
- [Github-hosted runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners) - GitHub provides Linux, Windows, and macOS virtual machines to run your workflows. These virtual machines are hosted in the cloud by Github.
|
||||||
|
- [Self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners) - you can host your own self-hosted runners in your own data center or cloud infrastructure. ARC deploys self-hosted runners.
|
||||||
|
|
||||||
|
## Self hosted runners
|
||||||
|
Self-hosted runners offer more control of hardware, operating system, and software tools than GitHub-hosted runners. With self-hosted runners, you can create custom hardware configurations that meet your needs with processing power or memory to run larger jobs, install software available on your local network, and choose an operating system not offered by GitHub-hosted runners.
|
||||||
|
|
||||||
|
### Types of Self hosted runners
|
||||||
|
Self-hosted runners can be physical, virtual, in a container, on-premises, or in a cloud.
|
||||||
|
- Traditional Deployment is having a physical machine, with OS and apps on it. The runner runs on this machine and executes any jobs. It comes with the cost of owning and operating the hardware 24/7 even if it isn't in use that entire time.
|
||||||
|
- Virtualized deployments are simpler to manage. Each runner runs on a virtual machine (VM) that runs on a host. There could be multiple such VMs running on the same host. VMs are complete OS’s and might take time to bring up everytime a clean environment is needed to run workflows.
|
||||||
|
- Containerized deployments are similar to VMs, but instead of bringing up entire VM’s, a container gets deployed.Kubernetes (K8s) provides a scalable and reproducible environment for containerized workloads. They are lightweight, loosely coupled, highly efficient and can be managed centrally. There are advantages to using Kubernetes (outlined "[here](https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/)."), but it is more complicated and less widely-understood than the other options. A managed provider makes this much simpler to run at scale.
|
||||||
|
|
||||||
|
*Actions Runner Controller(ARC) makes it simpler to run self hosted runners on K8s managed containers.*
|
||||||
|
|
||||||
|
## Actions Runner Controller (ARC)
|
||||||
|
ARC is a K8s controller to create self-hosted runners on your K8s cluster. With few commands, you can set up self hosted runners that can scale up and down based on demand. And since these could be ephemeral and based on containers, new instances of the runner can be brought up rapidly and cleanly.
|
||||||
|
|
||||||
|
### Deploying ARC
|
||||||
|
We have a quick start guide that demonstrates how to easily deploy ARC into your K8s environment. For more details, see "[QuickStart Guide](/README.md#getting-started)."
|
||||||
|
|
||||||
|
|
||||||
|
## ARC components
|
||||||
|
ARC basically consists of a set of custom resources. An ARC deployment is applying these custom resources onto a K8s cluster. Once applied, it creates a set of Pods, with the Github Actions runner running within them. Github is now able to treat these Pods as self hosted runners and allocate jobs to them.
|
||||||
|
|
||||||
|
### Custom resources
|
||||||
|
ARC consists of several custom resource definitions (Runner, Runner Set, Runner Deployment, Runner Replica Set and Horizontal Runner AutoScaler). For more information on CRDs, refer "[Kubernetes Custom Resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)."
|
||||||
|
|
||||||
|
The helm command (in the QuickStart guide) installs the custom resources into the actions-runner-system namespace.
|
||||||
|
```console
|
||||||
|
helm install -f custom-values.yaml --wait --namespace actions-runner-system \
|
||||||
|
--create-namespace actions-runner-controller \
|
||||||
|
actions-runner-controller/actions-runner-controller
|
||||||
|
```
|
||||||
|
|
||||||
|
### Runner deployment
|
||||||
|
Once the custom resources are installed, another command deploys ARC into your K8s cluster.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
The `Deployment and Configure ARC` section in the `Quick Start guide` lists the steps to deploy ARC using a `runnerdeployment.yaml` file. Here, we will explain the details
|
||||||
|
For more details, see "[QuickStart Guide](/README.md#getting-started)."
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: RunnerDeployment
|
||||||
|
metadata:
|
||||||
|
name: example-runnerdeploy
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
repository: mumoshu/actions-runner-controller-ci
|
||||||
|
```
|
||||||
|
|
||||||
|
- `kind: RunnerDeployment`: indicates its a kind of custom resource RunnerDeployment.
|
||||||
|
- `replicas: 1` : will deploy one replica. Multiple replicas can also be deployed ( more on that later).
|
||||||
|
- `repository: mumoshu/actions-runner-controller-ci` : is the repository to link to when the pod comes up with the Actions runner (Note, this can be configured to link at the Enterprise or Organization level also).
|
||||||
|
|
||||||
|
When this configuration is applied with `kubectl apply -f runnerdeployment.yaml` , ARC creates one pod `example-runnerdeploy-[**]` with 2 containers `runner` and `docker`.
|
||||||
|
`runner` container has the github runner component installed, `docker` container has docker installed.
|
||||||
|
|
||||||
|
|
||||||
|
### The Runner container image
|
||||||
|
The GitHub hosted runners include a large amount of pre-installed software packages. For complete list, see "[Runner images](https://github.com/actions/virtual-environments/tree/main/images/linux)."
|
||||||
|
|
||||||
|
ARC maintains a few runner images with `latest` aligning with GitHub's Ubuntu version. These images do not contain all of the software installed on the GitHub runners. They contain subset of packages from the GitHub runners: Basic CLI packages, git, docker and build-essentials. To install additional software, it is recommended to use the corresponding setup actions. For instance, `actions/setup-java` for Java or `actions/setup-node` for Node.
|
||||||
|
|
||||||
|
## Executing workflows
|
||||||
|
Now, all the setup and configuration is done. A workflow can be created in the same repository that could target the self hosted runner created from ARC. The workflow needs to have `runs-on: self-hosted` so it can target the self host pool. For more information on targeting workflows to run on self hosted runners, see "[Using Self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/using-self-hosted-runners-in-a-workflow)."
|
||||||
|
|
||||||
|
## Scaling runners - statically with replicas count
|
||||||
|
With a small tweak to the replicas count (for eg - `replicas: 2`) in the `runnerdeployment.yaml` file, more runners can be created. Depending on the count of replicas, those many sets of pods would be created. As before, Each pod contains the two containers.
|
||||||
|
|
||||||
|
|
||||||
|
## Scaling runners - dynamically with Pull Driven Scaling
|
||||||
|
ARC also allows for scaling the runners dynamically. There are two mechanisms for dynamically scaling - (1) Webhook driven scaling and (2) Pull Driven scaling, This document describes the Pull Driven scaling model.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
You can enable scaling with 3 steps
|
||||||
|
1) Enable `HorizontalRunnerAutoscaler` - Create a `deployment.yaml` file of type `HorizontalRunnerAutoscaler`. The schema for this file is defined below.
|
||||||
|
2) Scaling parameters - `minReplicas` and `maxReplicas` indicates the min and max number of replicas to scale to.
|
||||||
|
3) Scaling metrics - ARC currently supports `PercentageRunnersBusy` as a metric type. The `PercentageRunnersBusy` will poll GitHub for the number of runners in the `busy` state in the RunnerDeployment's namespace, it will then scale depending on how you have configured the scale factors.
|
||||||
|
|
||||||
|
### Pull Driven Scaling Schema
|
||||||
|
```yaml
|
||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: HorizontalRunnerAutoscaler
|
||||||
|
metadata:
|
||||||
|
name: example-runner-deployment-autoscaler
|
||||||
|
spec:
|
||||||
|
scaleTargetRef:
|
||||||
|
# Your RunnerDeployment Here
|
||||||
|
name: example-runnerdeploy
|
||||||
|
kind: RunnerDeployment
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 5
|
||||||
|
metrics:
|
||||||
|
- type: PercentageRunnersBusy
|
||||||
|
scaleUpThreshold: '0.75'
|
||||||
|
scaleDownThreshold: '0.25'
|
||||||
|
scaleUpFactor: '2'
|
||||||
|
scaleDownFactor: '0.5'
|
||||||
|
```
|
||||||
|
|
||||||
|
For more details - please see "[Pull Driven Scaling](detailed-docs.md#pull-driven-scaling)."
|
||||||
|
|
||||||
|
*The period between polls is defined by the controller's `--sync-period` flag. If this flag isn't provided then the controller defaults to a sync period of `1m`, this can be configured in seconds or minutes.*
|
||||||
|
|
||||||
|
## Other Configurations
|
||||||
|
ARC supports several different advanced configuration.
|
||||||
|
- support for alternate runners : Setting up runner pods with Docker-In-Docker configuration.
|
||||||
|
- managing runner groups : Managing a set of running with runner groups thus making it easy to manage different groups within enterprise
|
||||||
|
- Webhook driven scaling.
|
||||||
|
|
||||||
|
Please refer to the documentation in this repo for further details.
|
||||||
1809
docs/detailed-docs.md
Normal file
1809
docs/detailed-docs.md
Normal file
File diff suppressed because it is too large
Load Diff
99
docs/releasenotes/0.26.md
Normal file
99
docs/releasenotes/0.26.md
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
# actions-runner-controller v0.26.0
|
||||||
|
|
||||||
|
All planned changes in this release can be found in the milestone https://github.com/actions-runner-controller/actions-runner-controller/milestone/9.
|
||||||
|
|
||||||
|
Also see https://github.com/actions-runner-controller/actions-runner-controller/compare/v0.24.2...v0.26.0 for full changelog.
|
||||||
|
|
||||||
|
This log documents breaking changes and major enhancements
|
||||||
|
|
||||||
|
## Upgrading
|
||||||
|
|
||||||
|
In case you're using our Helm chart to deploy ARC, use the chart 0.21.0 or greater. Don't miss upgrading CRDs as usual! Helm doesn't upgrade CRDs.
|
||||||
|
|
||||||
|
## BREAKING CHANGE : Min GHES version is now 3.6
|
||||||
|
|
||||||
|
We've bumped the minimum requirement on GHES to [3.6.0](https://docs.github.com/en/enterprise-server@3.6/admin/release-notes#3.6.0) which has been released in August. The motivator for us was to use the new `visible_to_repository` option added to the list runner groups API for the runner group visibility based autoscaling which is crucial when you have a lot of runner groups that have non-distinct set of labels. If you don't use runner groups at all, ARC may just work, but YMMV.
|
||||||
|
|
||||||
|
Relevant PR(s): #158
|
||||||
|
|
||||||
|
## ENHANCEMENT : Rootless DinD runners
|
||||||
|
|
||||||
|
An awesome GitHub staff added the support for rootless DinD powered runners. Compared to the standard DinD, a rootless DinD gives you an additional layer of security without losing the ability to invoke Docker containers and dokcer builds from within your workflow jobs. [If you aren't using the Kubernetes container mode](https://github.com/actions-runner-controller/actions-runner-controller#runner-with-k8s-jobs), you should be using this new rootless DinD.
|
||||||
|
|
||||||
|
Rootless DinD is the recent enhancement to Docker that basically allows you to run the Docker daemon and therefore Docker containers without the reliance on the `root` user. In the context of DinD(Docker-in-Docker) and ARC, this rootless DinD runner still requires a privileged container to function at all. But, the Linux user that runs the Docker daemon and the `actions/runner` agent can now be non-root, which is considered more secure than running DinD within a privileged container, as a random worfklow job is no longer able to run privileged operations.
|
||||||
|
|
||||||
|
Before using this feature, we highly recommend you to read [the detailed explanation in the original pull request](https://github.com/actions-runner-controller/actions-runner-controller/pull/1644) and [the new section in ARC's documentation](https://github.com/actions-runner-controller/actions-runner-controller#runner-with-rootless-dind).
|
||||||
|
|
||||||
|
Big kudos to @some-natalie for implementing and contributing this feature!
|
||||||
|
|
||||||
|
Relevant PR(s): #1644
|
||||||
|
|
||||||
|
## ENHANCEMENT : More granular and real-time runner statuses
|
||||||
|
|
||||||
|
We added another controller flag and a Helm chart value to enable the new runner status update hook. Once enabled, it exposes more granular runner phases via the runner status.
|
||||||
|
|
||||||
|
Previously, every `Runner` resource managed by `RunnerDeployment` was only able to expose these three Phases to e.g. `kubectl get runner` output:
|
||||||
|
|
||||||
|
- `Pending`- The runner pod is waiting to be scheduled on any Kubernetes node/
|
||||||
|
- `Running`- The runner pod has been scheduled onto a node and its Linux namespace, containers, and the network has been set up. The primary processes of the containers are running.
|
||||||
|
- `Succeeded`- The primary processes of the pod containers have stopped with exit status 0.
|
||||||
|
|
||||||
|
As you may have realized, it had been quite useless, as it was a direct copy of the pod phase and tells almost nothing about the runner agent running inside the runner pod and the worfklow job that might be running.
|
||||||
|
|
||||||
|
Since #1268 though, it can optionally provide two more phases, and the modified version of the `Running` phase. Once enabled via the controller command-line flag or the Helm chart value, you start to see:
|
||||||
|
|
||||||
|
- `Registering`- The runner entrypoint started the runner registration process. Once the registration succeeds, it will update the phase to `Idle`.
|
||||||
|
- `Idle`- The runner has been registered to GitHub and it's still waiting for GitHub to assign a workflow job to run.
|
||||||
|
- `Running`- GitHub assigned a workflow job and the runner agent started running it.
|
||||||
|
|
||||||
|
All the three phases should be more useful than before. For example, `Registering` can tell you that it's (still) unable to register itself against the GitHub Actions service. It it's hanging for minutes at the `Registering` phase, it's very likely you misconfigured your GitHub API credentials or you've somehow broken runner pods so that the runner is unable to register itself. If it's stuck in `Idle` like forever even though you queued some workflow runs and jobs, it's very likely you misconfigured runner labels or the `on` field of your workflow definitions.
|
||||||
|
|
||||||
|
Big kudos to @fgalind1 for implementing and contributing this feature!
|
||||||
|
|
||||||
|
Relevant PR(s): #1268
|
||||||
|
|
||||||
|
## ENHANCEMENT : More Autoscaling-related metrics
|
||||||
|
|
||||||
|
We added several more metrics related to the pull-based autoscaling so that you can scrape it via the [Prometheus exposition format](https://github.com/Showmax/prometheus-docs/blob/master/content/docs/instrumenting/exposition_formats.md), track and observe the changes on the graphing, dashboarding and alerting solution of your choice.
|
||||||
|
|
||||||
|
For `PercentageRunnersBusy` metric, we added:
|
||||||
|
|
||||||
|
- horizontalrunnerautoscaler_replicas_desired
|
||||||
|
- horizontalrunnerautoscaler_runners
|
||||||
|
- horizontalrunnerautoscaler_runners_registered
|
||||||
|
- horizontalrunnerautoscaler_runners_busy
|
||||||
|
- horizontalrunnerautoscaler_terminating_busy
|
||||||
|
|
||||||
|
For `TotalNumberOfQueuedAndInProgressWorkflowRuns` metric, we added:
|
||||||
|
|
||||||
|
- horizontalrunnerautoscaler_necessary_replicas
|
||||||
|
- horizontalrunnerautoscaler_workflow_runs_completed
|
||||||
|
- horizontalrunnerautoscaler_workflow_runs_in_progress
|
||||||
|
- horizontalrunnerautoscaler_workflow_runs_queued
|
||||||
|
- horizontalrunnerautoscaler_workflow_runs_unknown
|
||||||
|
|
||||||
|
Big kudos to @debugger24 for implementing and contributing this feature!
|
||||||
|
|
||||||
|
Relevant PR(s): #1720
|
||||||
|
|
||||||
|
## ENHANCEMENT : Improved Multi-tenancy
|
||||||
|
|
||||||
|
We had a long-living feature request about reducing the number of ARC instances one needs to maintain to provide self-hosted runners across multiple enterprises and organizations, and here it is. You can now manage as many enterprises and organizations with ARC.
|
||||||
|
|
||||||
|
Previously you had to set up and manage an ARC instance per enterprise or in many cases per organization, because ARC was able to handle only one set of GitHub API credentials(PAT or GitHub App). The new multitenancy supports breaks this limitation by introducing the new `githubAPICredentialsFrom` field to the runner spec. You create a Kubernetes secret containing a GitHub API credentials and specify the secret name in `githubAPICredentialsFrom`, so that ARC picks it up and use it at the reconcilation time.
|
||||||
|
|
||||||
|
We've written a detailed guide about this feature in the ["Multitenancy" section of the README](https://github.com/actions-runner-controller/actions-runner-controller#multitenancy). Please read it and give it a try!
|
||||||
|
|
||||||
|
Lastly, this feature was stabilized by many early testers from the community. Big thanks and kudos to everyone who participated in testing, especially @Jalmeida1994 and @bm1216 for not only finding bugs but also contributing fixes ([#1725](https://github.com/actions-runner-controller/actions-runner-controller/pull/1725) and [#1781](https://github.com/actions-runner-controller/actions-runner-controller/pull/1781)!
|
||||||
|
|
||||||
|
Relevant PR(s): #1268
|
||||||
|
|
||||||
|
## ENHANCEMENT : Print ARC version number on startup
|
||||||
|
|
||||||
|
Our build script now injects the version number of ARC into the executable, and prints it on startup so that you can see from logs that which version of ARC you're currently running. Previously when you are to file a bug report, you had to be extra sure to know which version of ARC you're using and encountering an issue. It's now easier than ever because you can grab the version number show in the logs, without consulting the container image tag of chart's appVersion.
|
||||||
|
|
||||||
|
In addition to the logs, ARC is enhanced to send a HTTP `User-Agent` header containing the version number for every GitHub Actions API call ARC makes. You don't usually rely on it but GitHub and GitHub Actions's backend service can rely on it to collect the metrics about which versions of ARC folks are using.
|
||||||
|
|
||||||
|
Big kudos to @ViktorLindgren95 for implementing and contributing this feature!
|
||||||
|
|
||||||
|
Relevant PR(s): #1659
|
||||||
23
docs/releasenotes/app-version-mapping.md
Normal file
23
docs/releasenotes/app-version-mapping.md
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# Version mapping
|
||||||
|
|
||||||
|
The following table summarizes the version mapping between controller and chart versions:
|
||||||
|
|
||||||
|
|Controller (App) Version|Chart Version|
|
||||||
|
|---|---|
|
||||||
|
|0.26.0|0.21.0|
|
||||||
|
|0.25.2|0.20.2|
|
||||||
|
|0.25.1|0.20.1|
|
||||||
|
|0.25.0|0.20.0|
|
||||||
|
|0.24.0|0.19.0|
|
||||||
|
|0.23.0|0.18.0|
|
||||||
|
|0.22.3|0.17.3|
|
||||||
|
|0.22.2|0.17.2|
|
||||||
|
|0.22.1|0.17.1|
|
||||||
|
|0.22.0|0.17.0|
|
||||||
|
|0.21.1|0.16.1|
|
||||||
|
|0.21.0|0.16.0|
|
||||||
|
|0.20.4|0.15.3|
|
||||||
|
|0.20.3|0.15.1/0.15.0|
|
||||||
|
|0.20.2|0.14.0|
|
||||||
|
|0.19.0|0.12.0|
|
||||||
|
|0.18.2|0.11.0|
|
||||||
@@ -47,7 +47,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
|||||||
status := req.URL.Query().Get("status")
|
status := req.URL.Query().Get("status")
|
||||||
if h.Statuses != nil {
|
if h.Statuses != nil {
|
||||||
if body, ok := h.Statuses[status]; ok {
|
if body, ok := h.Statuses[status]; ok {
|
||||||
fmt.Fprintf(w, body)
|
fmt.Fprint(w, body)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -69,7 +69,7 @@ func (h *MapHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
|||||||
w.WriteHeader(404)
|
w.WriteHeader(404)
|
||||||
} else {
|
} else {
|
||||||
w.WriteHeader(h.Status)
|
w.WriteHeader(h.Status)
|
||||||
fmt.Fprintf(w, body)
|
fmt.Fprint(w, body)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
|
|
||||||
"github.com/google/go-github/v45/github"
|
"github.com/google/go-github/v47/github"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -10,11 +10,12 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/actions-runner-controller/actions-runner-controller/build"
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github/metrics"
|
"github.com/actions-runner-controller/actions-runner-controller/github/metrics"
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/logging"
|
"github.com/actions-runner-controller/actions-runner-controller/logging"
|
||||||
"github.com/bradleyfalzon/ghinstallation/v2"
|
"github.com/bradleyfalzon/ghinstallation/v2"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"github.com/google/go-github/v45/github"
|
"github.com/google/go-github/v47/github"
|
||||||
"github.com/gregjones/httpcache"
|
"github.com/gregjones/httpcache"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
@@ -42,6 +43,7 @@ type Client struct {
|
|||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
// GithubBaseURL to Github without API suffix.
|
// GithubBaseURL to Github without API suffix.
|
||||||
GithubBaseURL string
|
GithubBaseURL string
|
||||||
|
IsEnterprise bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type BasicAuthTransport struct {
|
type BasicAuthTransport struct {
|
||||||
@@ -94,8 +96,10 @@ func (c *Config) NewClient() (*Client, error) {
|
|||||||
|
|
||||||
var client *github.Client
|
var client *github.Client
|
||||||
var githubBaseURL string
|
var githubBaseURL string
|
||||||
|
var isEnterprise bool
|
||||||
if len(c.EnterpriseURL) > 0 {
|
if len(c.EnterpriseURL) > 0 {
|
||||||
var err error
|
var err error
|
||||||
|
isEnterprise = true
|
||||||
client, err = github.NewEnterpriseClient(c.EnterpriseURL, c.EnterpriseURL, httpClient)
|
client, err = github.NewEnterpriseClient(c.EnterpriseURL, c.EnterpriseURL, httpClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("enterprise client creation failed: %v", err)
|
return nil, fmt.Errorf("enterprise client creation failed: %v", err)
|
||||||
@@ -134,14 +138,13 @@ func (c *Config) NewClient() (*Client, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
client.UserAgent = "actions-runner-controller/" + build.Version
|
||||||
client.UserAgent = "actions-runner-controller"
|
|
||||||
|
|
||||||
return &Client{
|
return &Client{
|
||||||
Client: client,
|
Client: client,
|
||||||
regTokens: map[string]*github.RegistrationToken{},
|
regTokens: map[string]*github.RegistrationToken{},
|
||||||
mu: sync.Mutex{},
|
mu: sync.Mutex{},
|
||||||
GithubBaseURL: githubBaseURL,
|
GithubBaseURL: githubBaseURL,
|
||||||
|
IsEnterprise: isEnterprise,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -243,29 +246,6 @@ func (c *Client) ListRunners(ctx context.Context, enterprise, org, repo string)
|
|||||||
return runners, nil
|
return runners, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListOrganizationRunnerGroups returns all the runner groups defined in the organization and
|
|
||||||
// inherited to the organization from an enterprise.
|
|
||||||
func (c *Client) ListOrganizationRunnerGroups(ctx context.Context, org string) ([]*github.RunnerGroup, error) {
|
|
||||||
var runnerGroups []*github.RunnerGroup
|
|
||||||
|
|
||||||
opts := github.ListOrgRunnerGroupOptions{}
|
|
||||||
opts.PerPage = 100
|
|
||||||
for {
|
|
||||||
list, res, err := c.Client.Actions.ListOrganizationRunnerGroups(ctx, org, &opts)
|
|
||||||
if err != nil {
|
|
||||||
return runnerGroups, fmt.Errorf("failed to list organization runner groups: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
runnerGroups = append(runnerGroups, list.RunnerGroups...)
|
|
||||||
if res.NextPage == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
opts.Page = res.NextPage
|
|
||||||
}
|
|
||||||
|
|
||||||
return runnerGroups, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListOrganizationRunnerGroupsForRepository returns all the runner groups defined in the organization and
|
// ListOrganizationRunnerGroupsForRepository returns all the runner groups defined in the organization and
|
||||||
// inherited to the organization from an enterprise.
|
// inherited to the organization from an enterprise.
|
||||||
// We can remove this when google/go-github library is updated to support this.
|
// We can remove this when google/go-github library is updated to support this.
|
||||||
@@ -439,7 +419,6 @@ func splitOwnerAndRepo(repo string) (string, string, error) {
|
|||||||
}
|
}
|
||||||
return chunk[0], chunk[1], nil
|
return chunk[0], chunk[1], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getEnterpriseApiUrl(baseURL string) (string, error) {
|
func getEnterpriseApiUrl(baseURL string) (string, error) {
|
||||||
baseEndpoint, err := url.Parse(baseURL)
|
baseEndpoint, err := url.Parse(baseURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
||||||
"github.com/google/go-github/v45/github"
|
"github.com/google/go-github/v47/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
var server *httptest.Server
|
var server *httptest.Server
|
||||||
@@ -155,7 +155,7 @@ func TestCleanup(t *testing.T) {
|
|||||||
|
|
||||||
func TestUserAgent(t *testing.T) {
|
func TestUserAgent(t *testing.T) {
|
||||||
client := newTestClient()
|
client := newTestClient()
|
||||||
if client.UserAgent != "actions-runner-controller" {
|
if client.UserAgent != "actions-runner-controller/NA" {
|
||||||
t.Errorf("UserAgent should be set to actions-runner-controller")
|
t.Errorf("UserAgent should be set to actions-runner-controller/NA")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
69
go.mod
69
go.mod
@@ -1,54 +1,55 @@
|
|||||||
module github.com/actions-runner-controller/actions-runner-controller
|
module github.com/actions-runner-controller/actions-runner-controller
|
||||||
|
|
||||||
go 1.18
|
go 1.19
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/bradleyfalzon/ghinstallation/v2 v2.0.4
|
github.com/bradleyfalzon/ghinstallation/v2 v2.1.0
|
||||||
github.com/davecgh/go-spew v1.1.1
|
github.com/davecgh/go-spew v1.1.1
|
||||||
github.com/go-logr/logr v1.2.3
|
github.com/go-logr/logr v1.2.3
|
||||||
github.com/google/go-cmp v0.5.8
|
github.com/google/go-cmp v0.5.9
|
||||||
github.com/google/go-github/v45 v45.2.0
|
github.com/google/go-github/v47 v47.1.0
|
||||||
github.com/gorilla/mux v1.8.0
|
github.com/gorilla/mux v1.8.0
|
||||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
|
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
|
||||||
github.com/kelseyhightower/envconfig v1.4.0
|
github.com/kelseyhightower/envconfig v1.4.0
|
||||||
github.com/onsi/ginkgo v1.16.5
|
github.com/onsi/ginkgo v1.16.5
|
||||||
github.com/onsi/gomega v1.19.0
|
github.com/onsi/gomega v1.22.1
|
||||||
github.com/prometheus/client_golang v1.12.2
|
github.com/prometheus/client_golang v1.13.0
|
||||||
github.com/stretchr/testify v1.8.0
|
github.com/stretchr/testify v1.8.0
|
||||||
github.com/teambition/rrule-go v1.8.0
|
github.com/teambition/rrule-go v1.8.0
|
||||||
go.uber.org/zap v1.21.0
|
go.uber.org/zap v1.23.0
|
||||||
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0
|
golang.org/x/oauth2 v0.1.0
|
||||||
gomodules.xyz/jsonpatch/v2 v2.2.0
|
gomodules.xyz/jsonpatch/v2 v2.2.0
|
||||||
k8s.io/api v0.24.2
|
k8s.io/api v0.25.3
|
||||||
k8s.io/apimachinery v0.24.2
|
k8s.io/apimachinery v0.25.3
|
||||||
k8s.io/client-go v0.24.2
|
k8s.io/client-go v0.25.3
|
||||||
sigs.k8s.io/controller-runtime v0.12.2
|
sigs.k8s.io/controller-runtime v0.13.0
|
||||||
sigs.k8s.io/yaml v1.3.0
|
sigs.k8s.io/yaml v1.3.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go v0.81.0 // indirect
|
cloud.google.com/go v0.97.0 // indirect
|
||||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
|
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
|
||||||
|
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
|
||||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
||||||
github.com/go-logr/zapr v1.2.0 // indirect
|
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||||
|
github.com/go-logr/zapr v1.2.3 // indirect
|
||||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||||
github.com/go-openapi/jsonreference v0.19.5 // indirect
|
github.com/go-openapi/jsonreference v0.19.5 // indirect
|
||||||
github.com/go-openapi/swag v0.19.14 // indirect
|
github.com/go-openapi/swag v0.19.14 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang-jwt/jwt/v4 v4.0.0 // indirect
|
github.com/golang-jwt/jwt/v4 v4.4.1 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||||
github.com/google/go-github/v41 v41.0.0 // indirect
|
github.com/google/go-github/v45 v45.2.0 // indirect
|
||||||
github.com/google/go-querystring v1.1.0 // indirect
|
github.com/google/go-querystring v1.1.0 // indirect
|
||||||
github.com/google/gofuzz v1.1.0 // indirect
|
github.com/google/gofuzz v1.1.0 // indirect
|
||||||
github.com/google/uuid v1.1.2 // indirect
|
github.com/google/uuid v1.1.2 // indirect
|
||||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
|
||||||
github.com/imdario/mergo v0.3.12 // indirect
|
github.com/imdario/mergo v0.3.12 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
@@ -61,30 +62,30 @@ require (
|
|||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
github.com/prometheus/common v0.32.1 // indirect
|
github.com/prometheus/common v0.37.0 // indirect
|
||||||
github.com/prometheus/procfs v0.7.3 // indirect
|
github.com/prometheus/procfs v0.8.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
go.uber.org/atomic v1.7.0 // indirect
|
go.uber.org/atomic v1.7.0 // indirect
|
||||||
go.uber.org/multierr v1.6.0 // indirect
|
go.uber.org/multierr v1.6.0 // indirect
|
||||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect
|
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect
|
||||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
|
golang.org/x/net v0.1.0 // indirect
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
golang.org/x/sys v0.1.0 // indirect
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
golang.org/x/term v0.1.0 // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.4.0 // indirect
|
||||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/protobuf v1.28.0 // indirect
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
k8s.io/apiextensions-apiserver v0.24.2 // indirect
|
k8s.io/apiextensions-apiserver v0.25.0 // indirect
|
||||||
k8s.io/component-base v0.24.2 // indirect
|
k8s.io/component-base v0.25.0 // indirect
|
||||||
k8s.io/klog/v2 v2.60.1 // indirect
|
k8s.io/klog/v2 v2.70.1 // indirect
|
||||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
|
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
|
||||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
|
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
replace github.com/gregjones/httpcache => github.com/actions-runner-controller/httpcache v0.2.0
|
replace github.com/gregjones/httpcache => github.com/actions-runner-controller/httpcache v0.2.0
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user