mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 19:50:30 +00:00
Compare commits
326 Commits
actions-ru
...
gha-runner
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b6515fe25c | ||
|
|
1c7b7f467d | ||
|
|
73e22a1756 | ||
|
|
9b44f0051c | ||
|
|
6b4250ca90 | ||
|
|
ced88228fc | ||
|
|
44c06c21ce | ||
|
|
4103fe35df | ||
|
|
a44fe04bef | ||
|
|
274d0c874e | ||
|
|
256e08eb45 | ||
|
|
f677fd5872 | ||
|
|
d9627141dc | ||
|
|
3886f285f8 | ||
|
|
dab900462b | ||
|
|
dd8ec1a055 | ||
|
|
8e52a6d2cf | ||
|
|
9990243520 | ||
|
|
08919814b1 | ||
|
|
facae69e0b | ||
|
|
8f62e35f6b | ||
|
|
55951c2bdb | ||
|
|
c4297d25bb | ||
|
|
0774f0680c | ||
|
|
92ab11b4d2 | ||
|
|
7414dc6568 | ||
|
|
34efb9d585 | ||
|
|
fbad56197f | ||
|
|
a5cef7e47b | ||
|
|
1f4fe4681e | ||
|
|
067686c684 | ||
|
|
df12e00c9e | ||
|
|
cc26593a9b | ||
|
|
835eac7835 | ||
|
|
01e9dd31a9 | ||
|
|
803818162c | ||
|
|
219ba5b477 | ||
|
|
b09e3a2dc9 | ||
|
|
7ea60e497c | ||
|
|
c8918f5a7b | ||
|
|
d57d17f161 | ||
|
|
6e69c75637 | ||
|
|
882bfab569 | ||
|
|
3327f620fb | ||
|
|
282f2dd09c | ||
|
|
d67f80863e | ||
|
|
4932412cd6 | ||
|
|
6da1cde09c | ||
|
|
f9bae708c2 | ||
|
|
05a3908ba6 | ||
|
|
606ed1b28e | ||
|
|
de244a17be | ||
|
|
5be307ec62 | ||
|
|
ee71ff14bd | ||
|
|
9e93c7ee54 | ||
|
|
bb61bb1342 | ||
|
|
23fdca4786 | ||
|
|
211bacaf1e | ||
|
|
0324658a3f | ||
|
|
c4d3cff3df | ||
|
|
294bd75cf1 | ||
|
|
068a427c52 | ||
|
|
622eaa34f8 | ||
|
|
619667fc3b | ||
|
|
3e88ae2d38 | ||
|
|
360957cfbc | ||
|
|
e1fcd63f92 | ||
|
|
461c016b98 | ||
|
|
2e406e3aef | ||
|
|
044c8ad4d5 | ||
|
|
eaa451df32 | ||
|
|
ab04a2b616 | ||
|
|
d32319be50 | ||
|
|
057b04763f | ||
|
|
bc4f4fee12 | ||
|
|
a6c4d84234 | ||
|
|
e71c64683b | ||
|
|
4aadc7d128 | ||
|
|
45ebcb1c0a | ||
|
|
3ede9b5a01 | ||
|
|
84104de74b | ||
|
|
aa6dab5a9a | ||
|
|
086f9fd2d6 | ||
|
|
2407e4f6c6 | ||
|
|
fc23b7d08e | ||
|
|
85ec00a5a5 | ||
|
|
03d4784518 | ||
|
|
dcfacf4f1e | ||
|
|
02d84575e2 | ||
|
|
3021be73c7 | ||
|
|
adb5bc9f66 | ||
|
|
466be710ee | ||
|
|
acbce4b70a | ||
|
|
418f719bdf | ||
|
|
300e93c59d | ||
|
|
0285da1a32 | ||
|
|
b8e5185fef | ||
|
|
187479f08c | ||
|
|
31244dd61b | ||
|
|
a8417ec67e | ||
|
|
775dc60c94 | ||
|
|
ecd7531917 | ||
|
|
ad1989072e | ||
|
|
fe05987eea | ||
|
|
bd392c3665 | ||
|
|
58d80a7c12 | ||
|
|
212b9daec3 | ||
|
|
28ea8d4e7b | ||
|
|
c1fb793773 | ||
|
|
63d2cbfdaa | ||
|
|
18077a1e83 | ||
|
|
3ae9f09532 | ||
|
|
96a930bfd9 | ||
|
|
877c93c5c3 | ||
|
|
95c324b550 | ||
|
|
5e8f576f65 | ||
|
|
cc15ff0119 | ||
|
|
8318523627 | ||
|
|
fcb65b046b | ||
|
|
87f566e1e6 | ||
|
|
a786dae450 | ||
|
|
666ce8f917 | ||
|
|
9ba4b6b96a | ||
|
|
ae86b1a011 | ||
|
|
154fcde7d0 | ||
|
|
86d7893d61 | ||
|
|
8f374d561f | ||
|
|
40eec3c783 | ||
|
|
0c4798b773 | ||
|
|
7680cfd371 | ||
|
|
3b1771385f | ||
|
|
cb288fc99b | ||
|
|
7c81c2eec1 | ||
|
|
0908715786 | ||
|
|
186c98cf36 | ||
|
|
d328c61fc3 | ||
|
|
61d1235d2a | ||
|
|
3b36a81db6 | ||
|
|
fbdfe0df8c | ||
|
|
63e8f32281 | ||
|
|
2ac48038c6 | ||
|
|
23c8fe4a8b | ||
|
|
8505c95719 | ||
|
|
3de8085b87 | ||
|
|
828d51baf2 | ||
|
|
0b0219b88f | ||
|
|
362b6c10a3 | ||
|
|
2110fa5122 | ||
|
|
9eae9ac75f | ||
|
|
9bb416084b | ||
|
|
fdb049ba1e | ||
|
|
5f27548a73 | ||
|
|
f7fb1490dc | ||
|
|
9d48c791f5 | ||
|
|
371eff09ce | ||
|
|
cfad7a9b08 | ||
|
|
6234c568bd | ||
|
|
3ae3811944 | ||
|
|
c74ad6195f | ||
|
|
332548093a | ||
|
|
b4e143dadc | ||
|
|
93c4dd856e | ||
|
|
93aea48c38 | ||
|
|
14b17cca73 | ||
|
|
5298c6ea29 | ||
|
|
8fa08d59b1 | ||
|
|
003c552c34 | ||
|
|
83370d7f95 | ||
|
|
7da9d0ae19 | ||
|
|
b56fa6a748 | ||
|
|
a22ee8a5f1 | ||
|
|
e1762ba746 | ||
|
|
710e2fbc3a | ||
|
|
433552770e | ||
|
|
ba2a32eef6 | ||
|
|
de0d7ad78c | ||
|
|
0382f3bbd5 | ||
|
|
b6640a033c | ||
|
|
998c028d90 | ||
|
|
f8dffab19d | ||
|
|
7ff5b7da8c | ||
|
|
6aaff4ecee | ||
|
|
437d0173b0 | ||
|
|
a389292478 | ||
|
|
6eadb03669 | ||
|
|
aa60021ab0 | ||
|
|
50e26bd2f6 | ||
|
|
2dd13b4a19 | ||
|
|
35af24cf03 | ||
|
|
ca96b66fbe | ||
|
|
4db5fbc7a1 | ||
|
|
add83bc7bc | ||
|
|
666bba784c | ||
|
|
0672ff0ff9 | ||
|
|
f2f22827a6 | ||
|
|
a3a801757d | ||
|
|
0c003f20d4 | ||
|
|
863760828a | ||
|
|
517fae4119 | ||
|
|
d9e1e64dc6 | ||
|
|
3c4ab2d479 | ||
|
|
3ca96557a6 | ||
|
|
5fd6ec4bc8 | ||
|
|
6863bdb208 | ||
|
|
f3fcb428ae | ||
|
|
41bae32a9f | ||
|
|
e4879e7ae4 | ||
|
|
e5bb130fda | ||
|
|
e7a21cfc53 | ||
|
|
8f54644b08 | ||
|
|
c56d6a6c85 | ||
|
|
a96c3e1102 | ||
|
|
d29de8d454 | ||
|
|
12c4d96250 | ||
|
|
46a13c0626 | ||
|
|
e32a8054d0 | ||
|
|
0deb6809b9 | ||
|
|
21af1ec19d | ||
|
|
dfadb86d66 | ||
|
|
c91e76f169 | ||
|
|
718232f8f4 | ||
|
|
c7f5f7d161 | ||
|
|
33bb6902bc | ||
|
|
aeb0601147 | ||
|
|
991c0b3211 | ||
|
|
71da6d5271 | ||
|
|
e4fd4bc99c | ||
|
|
d9a8dc7e84 | ||
|
|
795cf8b1de | ||
|
|
0615c2adb1 | ||
|
|
a918e56ece | ||
|
|
546b5251ed | ||
|
|
74dda4ea1b | ||
|
|
b29816290a | ||
|
|
921daff61b | ||
|
|
e233f7ad6a | ||
|
|
623c84fa52 | ||
|
|
d4fb6204cb | ||
|
|
f8e07c7fe4 | ||
|
|
f73713859c | ||
|
|
e0a7be253e | ||
|
|
915739b972 | ||
|
|
4925880e5e | ||
|
|
c143fd50b5 | ||
|
|
dbd668ae2d | ||
|
|
5c1be3265b | ||
|
|
ebcd838501 | ||
|
|
6ef276b239 | ||
|
|
f70f325f48 | ||
|
|
f7c336f9dd | ||
|
|
ae380f5987 | ||
|
|
4bf1c12a98 | ||
|
|
cb561d8db4 | ||
|
|
eaf6d2f2e2 | ||
|
|
5ae7ce16e0 | ||
|
|
bdcde44642 | ||
|
|
5116e3800e | ||
|
|
4e107a4e50 | ||
|
|
93238697d9 | ||
|
|
48f62b4c89 | ||
|
|
ea94b3cc5b | ||
|
|
0cac005ab2 | ||
|
|
55ca7bfdf5 | ||
|
|
ca97f39fcb | ||
|
|
f0c8c07428 | ||
|
|
e54edea918 | ||
|
|
e58f82bfce | ||
|
|
244e0dd987 | ||
|
|
02009cef17 | ||
|
|
2b5af62184 | ||
|
|
ec58ad19e0 | ||
|
|
cc9fe33ef5 | ||
|
|
4a5a85fd61 | ||
|
|
56b26fd751 | ||
|
|
36e95dad47 | ||
|
|
3724b46033 | ||
|
|
538e2783d7 | ||
|
|
72ca998266 | ||
|
|
d439ed5c81 | ||
|
|
58c2bdf2bb | ||
|
|
fe9164b025 | ||
|
|
06141b39b4 | ||
|
|
ac4c3fd365 | ||
|
|
dc29e31bcc | ||
|
|
784019f3d7 | ||
|
|
fc55477c1c | ||
|
|
3f78f71137 | ||
|
|
e511401e51 | ||
|
|
37aa1a0b8c | ||
|
|
bea0775bec | ||
|
|
79a494b2aa | ||
|
|
97404144eb | ||
|
|
b77489d098 | ||
|
|
4152afbd30 | ||
|
|
29f621e1c8 | ||
|
|
5651ba6ead | ||
|
|
759cc4b47f | ||
|
|
4ede0c18d0 | ||
|
|
9091d9b756 | ||
|
|
a09c2564d9 | ||
|
|
a555c90fd5 | ||
|
|
38644cf4e8 | ||
|
|
23f357db10 | ||
|
|
584745b67d | ||
|
|
df9592dc99 | ||
|
|
8071ac7066 | ||
|
|
3c33eca501 | ||
|
|
aa827474b2 | ||
|
|
c75c9f9226 | ||
|
|
c09a04ec01 | ||
|
|
618276e3d3 | ||
|
|
18dd89c884 | ||
|
|
98b17dc0a5 | ||
|
|
c658dcfa6d | ||
|
|
c4996d4bbd | ||
|
|
7a3fa4f362 | ||
|
|
1bfd743e69 | ||
|
|
734f3bd63a | ||
|
|
409dc4c114 | ||
|
|
4b9a6c6700 | ||
|
|
86e1a4a8f3 | ||
|
|
544d620bc3 | ||
|
|
1cfe1974c4 | ||
|
|
7e4b6ebd6d | ||
|
|
11cb9b7882 | ||
|
|
10b88bf070 |
36
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
36
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,8 +1,18 @@
|
||||
name: Bug Report
|
||||
description: File a bug report
|
||||
title: "Bug"
|
||||
labels: ["bug"]
|
||||
title: "<Please write what didn't work for you here>"
|
||||
labels: ["bug", "needs triage"]
|
||||
body:
|
||||
- type: checkboxes
|
||||
id: read-troubleshooting-guide
|
||||
attributes:
|
||||
label: Checks
|
||||
description: Please check all the boxes below before submitting
|
||||
options:
|
||||
- label: I've already read https://github.com/actions/actions-runner-controller/blob/master/TROUBLESHOOTING.md and I'm sure my issue is not covered in the troubleshooting guide.
|
||||
required: true
|
||||
- label: I'm not using a custom entrypoint in my runner image
|
||||
required: true
|
||||
- type: input
|
||||
id: controller-version
|
||||
attributes:
|
||||
@@ -41,7 +51,7 @@ body:
|
||||
label: cert-manager installation
|
||||
description: Confirm that you've installed cert-manager correctly by answering a few questions
|
||||
placeholder: |
|
||||
- Did you follow https://github.com/actions-runner-controller/actions-runner-controller#installation? If not, describe the installation process so that we can reproduce your environment.
|
||||
- Did you follow https://github.com/actions/actions-runner-controller#installation? If not, describe the installation process so that we can reproduce your environment.
|
||||
- Are you sure you've installed cert-manager from an official source?
|
||||
(Note that we won't provide user support for cert-manager itself. Make sure cert-manager is fully working before testing ARC or reporting a bug
|
||||
validations:
|
||||
@@ -50,16 +60,18 @@ body:
|
||||
id: checks
|
||||
attributes:
|
||||
label: Checks
|
||||
description: Please check the boxes below before submitting
|
||||
description: Please check all the boxes below before submitting
|
||||
options:
|
||||
- label: This isn't a question or user support case (For Q&A and community support, go to [Discussions](https://github.com/actions-runner-controller/actions-runner-controller/discussions). It might also be a good idea to contract with any of contributors and maintainers if your business is so critical and therefore you need priority support
|
||||
- label: This isn't a question or user support case (For Q&A and community support, go to [Discussions](https://github.com/actions/actions-runner-controller/discussions). It might also be a good idea to contract with any of contributors and maintainers if your business is so critical and therefore you need priority support
|
||||
required: true
|
||||
- label: I've read [releasenotes](https://github.com/actions-runner-controller/actions-runner-controller/tree/master/docs/releasenotes) before submitting this issue and I'm sure it's not due to any recently-introduced backward-incompatible changes
|
||||
- label: I've read [releasenotes](https://github.com/actions/actions-runner-controller/tree/master/docs/releasenotes) before submitting this issue and I'm sure it's not due to any recently-introduced backward-incompatible changes
|
||||
required: true
|
||||
- label: My actions-runner-controller version (v0.x.y) does support the feature
|
||||
required: true
|
||||
- label: I've already upgraded ARC (including the CRDs, see charts/actions-runner-controller/docs/UPGRADING.md for details) to the latest and it didn't fix the issue
|
||||
required: true
|
||||
- label: I've migrated to the workflow job webhook event (if you using webhook driven scaling)
|
||||
required: true
|
||||
- type: textarea
|
||||
id: resource-definitions
|
||||
attributes:
|
||||
@@ -129,8 +141,8 @@ body:
|
||||
- type: textarea
|
||||
id: controller-logs
|
||||
attributes:
|
||||
label: Controller Logs
|
||||
description: "NEVER EVER OMIT THIS! Include logs from `actions-runner-controller`'s controller-manager pod"
|
||||
label: Whole Controller Logs
|
||||
description: "NEVER EVER OMIT THIS! Include logs from `actions-runner-controller`'s controller-manager pod. Don't omit the parts you think irrelevant!"
|
||||
render: shell
|
||||
placeholder: |
|
||||
PROVIDE THE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
||||
@@ -149,11 +161,11 @@ body:
|
||||
- type: textarea
|
||||
id: runner-pod-logs
|
||||
attributes:
|
||||
label: Runner Pod Logs
|
||||
description: "Include logs from runner pod(s)"
|
||||
label: Whole Runner Pod Logs
|
||||
description: "Include logs from runner pod(s). Please don't omit the parts you think irrelevant!"
|
||||
render: shell
|
||||
placeholder: |
|
||||
PROVIDE THE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
||||
PROVIDE THE WHOLE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
||||
|
||||
To grab the runner pod logs:
|
||||
|
||||
@@ -165,6 +177,8 @@ body:
|
||||
|
||||
kubectl -n $NS logs $POD_NAME -c runner > runnerpod_runner.log
|
||||
kubectl -n $NS logs $POD_NAME -c docker > runnerpod_docker.log
|
||||
|
||||
If any of the containers are getting terminated immediately, try adding `--previous` to the kubectl-logs command to obtain logs emitted before the termination.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
|
||||
11
.github/ISSUE_TEMPLATE/config.yml
vendored
11
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,15 +1,14 @@
|
||||
# Blank issues are mainly for maintainers who are known to write complete issue descriptions without need to following a form
|
||||
blank_issues_enabled: true
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Sponsor ARC Maintainers
|
||||
about: If your business relies on the continued maintainance of actions-runner-controller, please consider sponsoring the project and the maintainers.
|
||||
url: https://github.com/actions-runner-controller/actions-runner-controller/tree/master/CODEOWNERS
|
||||
url: https://github.com/actions/actions-runner-controller/tree/master/CODEOWNERS
|
||||
- name: Ideas and Feature Requests
|
||||
about: Wanna request a feature? Create a discussion and collect :+1:s first.
|
||||
url: https://github.com/actions-runner-controller/actions-runner-controller/discussions/new?category=ideas
|
||||
url: https://github.com/actions/actions-runner-controller/discussions/new?category=ideas
|
||||
- name: Questions and User Support
|
||||
about: Need support using ARC? We use Discussions as the place to provide community support.
|
||||
url: https://github.com/actions-runner-controller/actions-runner-controller/discussions/new?category=questions
|
||||
url: https://github.com/actions/actions-runner-controller/discussions/new?category=questions
|
||||
- name: Need Paid Support?
|
||||
about: Consider contracting with any of the actions-runner-controller maintainers and contributors.
|
||||
url: https://github.com/actions-runner-controller/actions-runner-controller/tree/master/CODEOWNERS
|
||||
url: https://github.com/actions/actions-runner-controller/tree/master/CODEOWNERS
|
||||
|
||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,19 +1,21 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
labels: ["enhancement", "needs triage"]
|
||||
title: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
### What would you like added?
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
*A clear and concise description of what you want to happen.*
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
Note: Feature requests to integrate vendor specific cloud tools (e.g. `awscli`, `gcloud-sdk`, `azure-cli`) will likely be rejected as the Runner image aims to be vendor agnostic.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
### Why is this needed?
|
||||
|
||||
*A clear and concise description of any alternative solutions or features you've considered.*
|
||||
|
||||
### Additional context
|
||||
|
||||
*Add any other context or screenshots about the feature request here.*
|
||||
|
||||
@@ -14,18 +14,13 @@ inputs:
|
||||
description: "GHCR password. Usually set from the secrets.GITHUB_TOKEN variable"
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
sha_short:
|
||||
description: "The short SHA used for image builds"
|
||||
value: ${{ steps.vars.outputs.sha_short }}
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Get Short SHA
|
||||
id: vars
|
||||
run: |
|
||||
echo ::set-output name=sha_short::${GITHUB_SHA::7}
|
||||
echo "sha_short=${GITHUB_SHA::7}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Set up QEMU
|
||||
@@ -37,14 +32,14 @@ runs:
|
||||
version: latest
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' }}
|
||||
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.password != '' }}
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ inputs.username }}
|
||||
password: ${{ inputs.password }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' }}
|
||||
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.ghcr_password != '' }}
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
|
||||
11
.github/dependabot.yml
vendored
Normal file
11
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "gomod" # See documentation for possible values
|
||||
directory: "/" # Location of package manifests
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
6
.github/renovate.json5
vendored
6
.github/renovate.json5
vendored
@@ -30,8 +30,10 @@
|
||||
},
|
||||
{
|
||||
"fileMatch": [
|
||||
"runner/actions-runner.dockerfile",
|
||||
"runner/actions-runner-dind.dockerfile"
|
||||
"runner/actions-runner.ubuntu-20.04.dockerfile",
|
||||
"runner/actions-runner.ubuntu-22.04.dockerfile",
|
||||
"runner/actions-runner-dind.ubuntu-20.04.dockerfile",
|
||||
"runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile"
|
||||
],
|
||||
"matchStrings": ["RUNNER_VERSION=+(?<currentValue>.*?)\\n"],
|
||||
"depNameTemplate": "actions/runner",
|
||||
|
||||
16
.github/workflows/e2e-test-dispatch-workflow.yaml
vendored
Normal file
16
.github/workflows/e2e-test-dispatch-workflow.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: ARC-REUSABLE-WORKFLOW
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
date_time:
|
||||
description: 'Datetime for runner name uniqueness, format: %Y-%m-%d-%H-%M-%S-%3N, example: 2023-02-14-13-00-16-791'
|
||||
required: true
|
||||
jobs:
|
||||
arc-runner-job:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job: [1, 2, 3]
|
||||
runs-on: arc-runner-${{ inputs.date_time }}
|
||||
steps:
|
||||
- run: echo "Hello World!" >> $GITHUB_STEP_SUMMARY
|
||||
23
.github/workflows/golangci-lint.yaml
vendored
Normal file
23
.github/workflows/golangci-lint.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: golangci-lint
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
jobs:
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
- uses: actions/checkout@v3
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
only-new-issues: true
|
||||
version: v1.49.0
|
||||
85
.github/workflows/publish-arc.yaml
vendored
85
.github/workflows/publish-arc.yaml
vendored
@@ -1,21 +1,34 @@
|
||||
name: Publish ARC
|
||||
|
||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||
# for details on why we use this approach
|
||||
on:
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_tag_name:
|
||||
description: 'Tag name of the release to publish'
|
||||
required: true
|
||||
push_to_registries:
|
||||
description: 'Push images to registries'
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
|
||||
env:
|
||||
TARGET_ORG: actions-runner-controller
|
||||
TARGET_REPO: actions-runner-controller
|
||||
|
||||
jobs:
|
||||
release-controller:
|
||||
name: Release
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
@@ -35,8 +48,14 @@ jobs:
|
||||
tar zxvf ghr_v0.13.0_linux_amd64.tar.gz
|
||||
sudo mv ghr_v0.13.0_linux_amd64/ghr /usr/local/bin
|
||||
|
||||
- name: Set version
|
||||
run: echo "VERSION=$(cat ${GITHUB_EVENT_PATH} | jq -r '.release.tag_name')" >> $GITHUB_ENV
|
||||
- name: Set version env variable
|
||||
run: |
|
||||
# Define the release tag name based on the event type
|
||||
if [[ "${{ github.event_name }}" == "release" ]]; then
|
||||
echo "VERSION=$(cat ${GITHUB_EVENT_PATH} | jq -r '.release.tag_name')" >> $GITHUB_ENV
|
||||
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
|
||||
echo "VERSION=${{ inputs.release_tag_name }}" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Upload artifacts
|
||||
env:
|
||||
@@ -44,27 +63,39 @@ jobs:
|
||||
run: |
|
||||
make github-release
|
||||
|
||||
- name: Setup Docker Environment
|
||||
id: vars
|
||||
uses: ./.github/actions/setup-docker-environment
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||
ghcr_username: ${{ github.actor }}
|
||||
ghcr_password: ${{ secrets.GITHUB_TOKEN }}
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
organization: ${{ env.TARGET_ORG }}
|
||||
|
||||
- name: Build and Push
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:latest
|
||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}
|
||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}-${{ steps.vars.outputs.sha_short }}
|
||||
ghcr.io/actions-runner-controller/actions-runner-controller:latest
|
||||
ghcr.io/actions-runner-controller/actions-runner-controller:${{ env.VERSION }}
|
||||
ghcr.io/actions-runner-controller/actions-runner-controller:${{ env.VERSION }}-${{ steps.vars.outputs.sha_short }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
- name: Resolve push to registries
|
||||
run: |
|
||||
# Define the push to registries based on the event type
|
||||
if [[ "${{ github.event_name }}" == "release" ]]; then
|
||||
echo "PUSH_TO_REGISTRIES=true" >> $GITHUB_ENV
|
||||
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
|
||||
echo "PUSH_TO_REGISTRIES=${{ inputs.push_to_registries }}" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Trigger Build And Push Images To Registries
|
||||
run: |
|
||||
# Authenticate
|
||||
gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }}
|
||||
|
||||
# Trigger the workflow run
|
||||
jq -n '{"event_type": "arc", "client_payload": {"release_tag_name": "${{ env.VERSION }}", "push_to_registries": "${{ env.PUSH_TO_REGISTRIES }}" }}' \
|
||||
| gh api -X POST /repos/actions-runner-controller/releases/dispatches --input -
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "The [publish-arc](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/publish-arc.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Release tag: ${{ env.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Push to registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-arc.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-arc.yaml)" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
199
.github/workflows/publish-arc2.yaml
vendored
Normal file
199
.github/workflows/publish-arc2.yaml
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
name: Publish ARC 2
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
ref:
|
||||
description: 'The branch, tag or SHA to cut a release from'
|
||||
required: false
|
||||
type: string
|
||||
default: ''
|
||||
release_tag_name:
|
||||
description: 'The name to tag the controller image with'
|
||||
required: true
|
||||
type: string
|
||||
default: 'canary'
|
||||
push_to_registries:
|
||||
description: 'Push images to registries'
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
publish_actions_runner_controller_2_chart:
|
||||
description: 'Publish new helm chart for actions-runner-controller-2'
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
publish_auto_scaling_runner_set_chart:
|
||||
description: 'Publish new helm chart for auto-scaling-runner-set'
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
env:
|
||||
HELM_VERSION: v3.8.0
|
||||
|
||||
permissions:
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
build-push-image:
|
||||
name: Build and push controller image
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
|
||||
- name: Resolve parameters
|
||||
id: resolve_parameters
|
||||
run: |
|
||||
resolvedRef="${{ inputs.ref }}"
|
||||
if [ -z "$resolvedRef" ]
|
||||
then
|
||||
resolvedRef="${{ github.ref }}"
|
||||
fi
|
||||
echo "resolved_ref=$resolvedRef" >> $GITHUB_OUTPUT
|
||||
echo "INFO: Resolving short SHA for $resolvedRef"
|
||||
echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
|
||||
echo "INFO: Normalizing repository name (lowercase)"
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
|
||||
# BuildKit v0.11 which has a bug causing intermittent
|
||||
# failures pushing images to GHCR
|
||||
version: v0.9.1
|
||||
driver-opts: image=moby/buildkit:v0.10.6
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build & push controller image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: VERSION=${{ inputs.release_tag_name }}
|
||||
push: ${{ inputs.push_to_registries }}
|
||||
tags: |
|
||||
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-2:${{ inputs.release_tag_name }}
|
||||
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-2:${{ inputs.release_tag_name }}-${{ steps.resolve_parameters.outputs.short_sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "The [publish-arc2](https://github.com/actions/actions-runner-controller/blob/main/.github/workflows/publish-arc2.yaml) workflow run was completed successfully!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Release tag: ${{ inputs.release_tag_name }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Push to registries: ${{ inputs.push_to_registries }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
publish-helm-chart-arc-2:
|
||||
if: ${{ inputs.publish_actions_runner_controller_2_chart == true }}
|
||||
needs: build-push-image
|
||||
name: Publish Helm chart for actions-runner-controller-2
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
|
||||
- name: Resolve parameters
|
||||
id: resolve_parameters
|
||||
run: |
|
||||
resolvedRef="${{ inputs.ref }}"
|
||||
if [ -z "$resolvedRef" ]
|
||||
then
|
||||
resolvedRef="${{ github.ref }}"
|
||||
fi
|
||||
echo "INFO: Resolving short SHA for $resolvedRef"
|
||||
echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
|
||||
echo "INFO: Normalizing repository name (lowercase)"
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v3.3
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
- name: Publish new helm chart for actions-runner-controller-2
|
||||
run: |
|
||||
echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io --username ${{ github.actor }} --password-stdin
|
||||
ACTIONS_RUNNER_CONTROLLER_2_CHART_VERSION_TAG=$(cat charts/actions-runner-controller-2/Chart.yaml | grep version: | cut -d " " -f 2)
|
||||
echo "ACTIONS_RUNNER_CONTROLLER_2_CHART_VERSION_TAG=${ACTIONS_RUNNER_CONTROLLER_2_CHART_VERSION_TAG}" >> $GITHUB_ENV
|
||||
helm package charts/actions-runner-controller-2/ --version="${ACTIONS_RUNNER_CONTROLLER_2_CHART_VERSION_TAG}"
|
||||
helm push actions-runner-controller-2-"${ACTIONS_RUNNER_CONTROLLER_2_CHART_VERSION_TAG}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-charts
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "New helm chart for actions-runner-controller-2 published successfully!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Actions-Runner-Controller-2 Chart version: ${{ env.ACTIONS_RUNNER_CONTROLLER_2_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
publish-helm-chart-auto-scaling-runner-set:
|
||||
if: ${{ inputs.publish_auto_scaling_runner_set_chart == true }}
|
||||
needs: build-push-image
|
||||
name: Publish Helm chart for auto-scaling-runner-set
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
|
||||
- name: Resolve parameters
|
||||
id: resolve_parameters
|
||||
run: |
|
||||
resolvedRef="${{ inputs.ref }}"
|
||||
if [ -z "$resolvedRef" ]
|
||||
then
|
||||
resolvedRef="${{ github.ref }}"
|
||||
fi
|
||||
echo "INFO: Resolving short SHA for $resolvedRef"
|
||||
echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
|
||||
echo "INFO: Normalizing repository name (lowercase)"
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v3.3
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
- name: Publish new helm chart for auto-scaling-runner-set
|
||||
run: |
|
||||
echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io --username ${{ github.actor }} --password-stdin
|
||||
|
||||
AUTO_SCALING_RUNNER_SET_CHART_VERSION_TAG=$(cat charts/auto-scaling-runner-set/Chart.yaml | grep version: | cut -d " " -f 2)
|
||||
echo "AUTO_SCALING_RUNNER_SET_CHART_VERSION_TAG=${AUTO_SCALING_RUNNER_SET_CHART_VERSION_TAG}" >> $GITHUB_ENV
|
||||
helm package charts/auto-scaling-runner-set/ --version="${AUTO_SCALING_RUNNER_SET_CHART_VERSION_TAG}"
|
||||
helm push auto-scaling-runner-set-"${AUTO_SCALING_RUNNER_SET_CHART_VERSION_TAG}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-charts
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "New helm chart for auto-scaling-runner-set published successfully!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Auto-Scaling-Runner-Set Chart version: ${{ env.AUTO_SCALING_RUNNER_SET_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY
|
||||
56
.github/workflows/publish-canary.yaml
vendored
56
.github/workflows/publish-canary.yaml
vendored
@@ -1,5 +1,7 @@
|
||||
name: Publish Canary Image
|
||||
|
||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||
# for details on why we use this approach
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
@@ -19,40 +21,50 @@ on:
|
||||
- 'LICENSE'
|
||||
- 'Makefile'
|
||||
|
||||
env:
|
||||
# Safeguard to prevent pushing images to registeries after build
|
||||
PUSH_TO_REGISTRIES: true
|
||||
TARGET_ORG: actions-runner-controller
|
||||
TARGET_REPO: actions-runner-controller
|
||||
|
||||
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
canary-build:
|
||||
name: Build and Publish Canary Image
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Docker Environment
|
||||
id: vars
|
||||
uses: ./.github/actions/setup-docker-environment
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||
ghcr_username: ${{ github.actor }}
|
||||
ghcr_password: ${{ secrets.GITHUB_TOKEN }}
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
organization: ${{ env.TARGET_ORG }}
|
||||
|
||||
# Considered unstable builds
|
||||
# See Issue #285, PR #286, and PR #323 for more information
|
||||
- name: Build and Push
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:canary
|
||||
ghcr.io/actions-runner-controller/actions-runner-controller:canary
|
||||
cache-from: type=gha,scope=arc-canary
|
||||
cache-to: type=gha,mode=max,scope=arc-canary
|
||||
- name: Trigger Build And Push Images To Registries
|
||||
run: |
|
||||
# Authenticate
|
||||
gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }}
|
||||
|
||||
# Trigger the workflow run
|
||||
jq -n '{"event_type": "canary", "client_payload": {"sha": "${{ github.sha }}", "push_to_registries": ${{ env.PUSH_TO_REGISTRIES }}}}' \
|
||||
| gh api -X POST /repos/actions-runner-controller/releases/dispatches --input -
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "The [publish-canary](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/publish-canary.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sha: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Push to registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml)" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
106
.github/workflows/publish-chart.yaml
vendored
106
.github/workflows/publish-chart.yaml
vendored
@@ -1,5 +1,7 @@
|
||||
name: Publish Helm Chart
|
||||
|
||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||
# for details on why we use this approach
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
@@ -8,6 +10,8 @@ on:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/publish-chart.yaml'
|
||||
- '!charts/actions-runner-controller/docs/**'
|
||||
- '!charts/actions-runner-controller-2/**'
|
||||
- '!charts/auto-scaling-runner-set/**'
|
||||
- '!**.md'
|
||||
workflow_dispatch:
|
||||
|
||||
@@ -31,7 +35,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v3.0
|
||||
uses: azure/setup-helm@v3.4
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
@@ -57,7 +61,7 @@ jobs:
|
||||
python-version: '3.7'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.2.1
|
||||
uses: helm/chart-testing-action@v2.3.1
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
@@ -73,7 +77,7 @@ jobs:
|
||||
|
||||
- name: Create kind cluster
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
uses: helm/kind-action@v1.3.0
|
||||
uses: helm/kind-action@v1.4.0
|
||||
|
||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||
- name: Install cert-manager
|
||||
@@ -86,20 +90,31 @@ jobs:
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct install --config charts/.ci/ct-config.yaml
|
||||
|
||||
# WARNING: This relies on the latest release being inat the top of the JSON from GitHub and a clean chart.yaml
|
||||
# WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
|
||||
- name: Check if Chart Publish is Needed
|
||||
id: publish-chart-step
|
||||
run: |
|
||||
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/actions-runner-controller/actions-runner-controller/master/charts/actions-runner-controller/Chart.yaml)
|
||||
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
|
||||
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
|
||||
RELEASE_LIST=$(curl -fs https://api.github.com/repos/actions-runner-controller/actions-runner-controller/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
|
||||
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
|
||||
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
|
||||
echo "Chart version in master : $NEW_CHART_VERSION"
|
||||
echo "Latest release chart version : $LATEST_RELEASED_CHART_VERSION"
|
||||
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
|
||||
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
|
||||
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION ]]; then
|
||||
echo "::set-output name=publish::true"
|
||||
echo "publish=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "publish=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
publish-chart:
|
||||
if: needs.lint-chart.outputs.publish-chart == 'true'
|
||||
needs: lint-chart
|
||||
@@ -107,7 +122,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write # for helm/chart-releaser-action to push chart release and create a release
|
||||
|
||||
env:
|
||||
CHART_TARGET_ORG: actions-runner-controller
|
||||
CHART_TARGET_REPO: actions-runner-controller.github.io
|
||||
CHART_TARGET_BRANCH: master
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -120,8 +138,68 @@ jobs:
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.4.0
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
organization: ${{ env.CHART_TARGET_ORG }}
|
||||
|
||||
- name: Install chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.4.1
|
||||
with:
|
||||
install_only: true
|
||||
install_dir: ${{ github.workspace }}/bin
|
||||
|
||||
- name: Package and upload release assets
|
||||
run: |
|
||||
cr package \
|
||||
${{ github.workspace }}/charts/actions-runner-controller/ \
|
||||
--package-path .cr-release-packages
|
||||
|
||||
cr upload \
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--package-path .cr-release-packages \
|
||||
--token ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate updated index.yaml
|
||||
run: |
|
||||
cr index \
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--index-path ${{ github.workspace }}/index.yaml \
|
||||
--pages-branch 'gh-pages' \
|
||||
--pages-index-path 'index.yaml'
|
||||
|
||||
# Chart Release was never intended to publish to a different repo
|
||||
# this workaround is intended to move the index.yaml to the target repo
|
||||
# where the github pages are hosted
|
||||
- name: Checkout pages repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
|
||||
path: ${{ env.CHART_TARGET_REPO }}
|
||||
ref: ${{ env.CHART_TARGET_BRANCH }}
|
||||
token: ${{ steps.get_workflow_token.outputs.token }}
|
||||
|
||||
- name: Copy index.yaml
|
||||
run: |
|
||||
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
|
||||
|
||||
- name: Commit and push
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
git add .
|
||||
git commit -m "Update index.yaml"
|
||||
git push
|
||||
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/main/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
72
.github/workflows/release-runners.yaml
vendored
Normal file
72
.github/workflows/release-runners.yaml
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
name: Runners
|
||||
|
||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||
# for details on why we use this approach
|
||||
on:
|
||||
# We must do a trigger on a push: instead of a types: closed so GitHub Secrets
|
||||
# are available to the workflow run
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
paths:
|
||||
- 'runner/VERSION'
|
||||
- '.github/workflows/release-runners.yaml'
|
||||
|
||||
env:
|
||||
# Safeguard to prevent pushing images to registeries after build
|
||||
PUSH_TO_REGISTRIES: true
|
||||
TARGET_ORG: actions-runner-controller
|
||||
TARGET_WORKFLOW: release-runners.yaml
|
||||
DOCKER_VERSION: 20.10.21
|
||||
RUNNER_CONTAINER_HOOKS_VERSION: 0.2.0
|
||||
|
||||
jobs:
|
||||
build-runners:
|
||||
name: Trigger Build and Push of Runner Images
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Get runner version
|
||||
id: runner_version
|
||||
run: |
|
||||
version=$(echo -n $(cat runner/VERSION))
|
||||
echo runner_version=$version >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
organization: ${{ env.TARGET_ORG }}
|
||||
|
||||
- name: Trigger Build And Push Runner Images To Registries
|
||||
env:
|
||||
RUNNER_VERSION: ${{ steps.runner_version.outputs.runner_version }}
|
||||
run: |
|
||||
# Authenticate
|
||||
gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }}
|
||||
|
||||
# Trigger the workflow run
|
||||
gh workflow run ${{ env.TARGET_WORKFLOW }} -R ${{ env.TARGET_ORG }}/releases \
|
||||
-f runner_version=${{ env.RUNNER_VERSION }} \
|
||||
-f docker_version=${{ env.DOCKER_VERSION }} \
|
||||
-f runner_container_hooks_version=${{ env.RUNNER_CONTAINER_HOOKS_VERSION }} \
|
||||
-f sha='${{ github.sha }}' \
|
||||
-f push_to_registries=${{ env.PUSH_TO_REGISTRIES }}
|
||||
|
||||
- name: Job summary
|
||||
env:
|
||||
RUNNER_VERSION: ${{ steps.runner_version.outputs.runner_version }}
|
||||
run: |
|
||||
echo "The [release-runners.yaml](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/release-runners.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- runner_version: ${{ env.RUNNER_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- docker_version: ${{ env.DOCKER_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- runner_container_hooks_version: ${{ env.RUNNER_CONTAINER_HOOKS_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sha: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- push_to_registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "[https://github.com/actions-runner-controller/releases/actions/workflows/release-runners.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/release-runners.yaml)" >> $GITHUB_STEP_SUMMARY
|
||||
29
.github/workflows/run-first-interaction.yaml
vendored
Normal file
29
.github/workflows/run-first-interaction.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: first-interaction
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
pull_request:
|
||||
branches: [master]
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
check_for_first_interaction:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/first-interaction@main
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
issue-message: |
|
||||
Hello! Thank you for filing an issue.
|
||||
|
||||
The maintainers will triage your issue shortly.
|
||||
|
||||
In the meantime, please take a look at the [troubleshooting guide](https://github.com/actions/actions-runner-controller/blob/master/TROUBLESHOOTING.md) for bug reports.
|
||||
|
||||
If this is a feature request, please review our [contribution guidelines](https://github.com/actions/actions-runner-controller/blob/master/CONTRIBUTING.md).
|
||||
pr-message: |
|
||||
Hello! Thank you for your contribution.
|
||||
|
||||
Please review our [contribution guidelines](https://github.com/actions/actions-runner-controller/blob/master/CONTRIBUTING.md) to understand the project's testing and code conventions.
|
||||
2
.github/workflows/run-stale.yaml
vendored
2
.github/workflows/run-stale.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
issues: write # for actions/stale to close stale issues
|
||||
pull-requests: write # for actions/stale to close stale PRs
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
- uses: actions/stale@v6
|
||||
with:
|
||||
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
|
||||
# turn off stale for both issues and PRs
|
||||
|
||||
83
.github/workflows/runners.yaml
vendored
83
.github/workflows/runners.yaml
vendored
@@ -1,83 +0,0 @@
|
||||
name: Runners
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
branches:
|
||||
- 'master'
|
||||
paths:
|
||||
- 'runner/**'
|
||||
- '!runner/Makefile'
|
||||
- '.github/workflows/runners.yaml'
|
||||
- '!**.md'
|
||||
# We must do a trigger on a push: instead of a types: closed so GitHub Secrets
|
||||
# are available to the workflow run
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
paths:
|
||||
- 'runner/**'
|
||||
- '!runner/Makefile'
|
||||
- '.github/workflows/runners.yaml'
|
||||
- '!**.md'
|
||||
|
||||
env:
|
||||
RUNNER_VERSION: 2.294.0
|
||||
DOCKER_VERSION: 20.10.12
|
||||
RUNNER_CONTAINER_HOOKS_VERSION: 0.1.2
|
||||
DOCKERHUB_USERNAME: summerwind
|
||||
|
||||
jobs:
|
||||
build-runners:
|
||||
name: Build ${{ matrix.name }}-${{ matrix.os-name }}-${{ matrix.os-version }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- name: actions-runner
|
||||
os-name: ubuntu
|
||||
os-version: 20.04
|
||||
- name: actions-runner-dind
|
||||
os-name: ubuntu
|
||||
os-version: 20.04
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Docker Environment
|
||||
id: vars
|
||||
uses: ./.github/actions/setup-docker-environment
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||
ghcr_username: ${{ github.actor }}
|
||||
ghcr_password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and Push Versioned Tags
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: ./runner
|
||||
file: ./runner/${{ matrix.name }}.dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }}
|
||||
build-args: |
|
||||
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
||||
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
||||
RUNNER_CONTAINER_HOOKS_VERSION=${{ env.RUNNER_CONTAINER_HOOKS_VERSION }}
|
||||
tags: |
|
||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}
|
||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}-${{ steps.vars.outputs.sha_short }}
|
||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:latest
|
||||
ghcr.io/${{ github.repository }}/${{ matrix.name }}:latest
|
||||
ghcr.io/${{ github.repository }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}
|
||||
ghcr.io/${{ github.repository }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}-${{ steps.vars.outputs.sha_short }}
|
||||
cache-from: type=gha,scope=build-${{ matrix.name }}
|
||||
cache-to: type=gha,mode=max,scope=build-${{ matrix.name }}
|
||||
107
.github/workflows/update-runners.yaml
vendored
Normal file
107
.github/workflows/update-runners.yaml
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
# This workflows polls releases from actions/runner and in case of a new one it
|
||||
# updates files containing runner version and opens a pull request.
|
||||
name: Update runners
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# run daily
|
||||
- cron: "0 9 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
# check_versions compares our current version and the latest available runner
|
||||
# version and sets them as outputs.
|
||||
check_versions:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
outputs:
|
||||
current_version: ${{ steps.versions.outputs.current_version }}
|
||||
latest_version: ${{ steps.versions.outputs.latest_version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get current and latest versions
|
||||
id: versions
|
||||
run: |
|
||||
CURRENT_VERSION=$(echo -n $(cat runner/VERSION))
|
||||
echo "Current version: $CURRENT_VERSION"
|
||||
echo current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT
|
||||
|
||||
LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner | grep -oP '(?<=v)[0-9.]+' | head -1)
|
||||
echo "Latest version: $LATEST_VERSION"
|
||||
echo latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT
|
||||
|
||||
# check_pr checks if a PR for the same update already exists. It only runs if
|
||||
# runner latest version != our current version. If no existing PR is found,
|
||||
# it sets a PR name as output.
|
||||
check_pr:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check_versions
|
||||
if: needs.check_versions.outputs.current_version != needs.check_versions.outputs.latest_version
|
||||
outputs:
|
||||
pr_name: ${{ steps.pr_name.outputs.pr_name }}
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
steps:
|
||||
- name: debug
|
||||
run:
|
||||
echo ${{ needs.check_versions.outputs.current_version }}
|
||||
echo ${{ needs.check_versions.outputs.latest_version }}
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: PR Name
|
||||
id: pr_name
|
||||
env:
|
||||
LATEST_VERSION: ${{ needs.check_versions.outputs.latest_version }}
|
||||
run: |
|
||||
PR_NAME="Update runner to version ${LATEST_VERSION}"
|
||||
|
||||
result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1)
|
||||
if [ -z "$result" ]
|
||||
then
|
||||
echo "No existing PRs found, setting output with pr_name=$PR_NAME"
|
||||
echo pr_name=$PR_NAME >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Found a PR with title '$PR_NAME' already existing: ${{ github.server_url }}/${{ github.repository }}/pull/$result"
|
||||
fi
|
||||
|
||||
# update_version updates runner version in the files listed below, commits
|
||||
# the changes and opens a pull request as `github-actions` bot.
|
||||
update_version:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- check_versions
|
||||
- check_pr
|
||||
if: needs.check_pr.outputs.pr_name
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: write
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
CURRENT_VERSION: ${{ needs.check_versions.outputs.current_version }}
|
||||
LATEST_VERSION: ${{ needs.check_versions.outputs.latest_version }}
|
||||
PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: New branch
|
||||
run: git checkout -b update-runner-$LATEST_VERSION
|
||||
- name: Update files
|
||||
run: |
|
||||
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION
|
||||
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
|
||||
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
|
||||
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
|
||||
|
||||
- name: Commit changes
|
||||
run: |
|
||||
# from https://github.com/orgs/community/discussions/26560
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
git config user.name "github-actions[bot]"
|
||||
git add .
|
||||
git commit -m "$PR_NAME"
|
||||
git push -u origin HEAD
|
||||
|
||||
- name: Create pull request
|
||||
run: gh pr create -f
|
||||
2
.github/workflows/validate-arc.yaml
vendored
2
.github/workflows/validate-arc.yaml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
- name: Set-up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18.2'
|
||||
go-version: '1.19'
|
||||
check-latest: false
|
||||
|
||||
- uses: actions/cache@v3
|
||||
|
||||
7
.github/workflows/validate-chart.yaml
vendored
7
.github/workflows/validate-chart.yaml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v3.0
|
||||
uses: azure/setup-helm@v3.4
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
@@ -52,7 +52,7 @@ jobs:
|
||||
python-version: '3.7'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.2.1
|
||||
uses: helm/chart-testing-action@v2.3.1
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
@@ -67,7 +67,7 @@ jobs:
|
||||
ct lint --config charts/.ci/ct-config.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.3.0
|
||||
uses: helm/kind-action@v1.4.0
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
|
||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||
@@ -78,5 +78,6 @@ jobs:
|
||||
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: |
|
||||
ct install --config charts/.ci/ct-config.yaml
|
||||
|
||||
24
.github/workflows/validate-runners.yaml
vendored
24
.github/workflows/validate-runners.yaml
vendored
@@ -6,13 +6,33 @@ on:
|
||||
- '**'
|
||||
paths:
|
||||
- 'runner/**'
|
||||
- 'test/entrypoint/**'
|
||||
- 'test/startup/**'
|
||||
- '!**.md'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
shellcheck:
|
||||
name: runner / shellcheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: shellcheck
|
||||
uses: reviewdog/action-shellcheck@v1
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
path: "./runner"
|
||||
pattern: |
|
||||
*.sh
|
||||
*.bash
|
||||
update-status
|
||||
# Make this consistent with `make shellsheck`
|
||||
shellcheck_flags: "--shell bash --source-path runner"
|
||||
exclude: "./.git/*"
|
||||
check_all_files_with_shebangs: "false"
|
||||
# Set this to "true" once we addressed all the shellcheck findings
|
||||
fail_on_error: "false"
|
||||
test-runner-entrypoint:
|
||||
name: Test entrypoint
|
||||
runs-on: ubuntu-latest
|
||||
@@ -22,4 +42,4 @@ jobs:
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
make acceptance/runner/entrypoint
|
||||
make acceptance/runner/startup
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -29,6 +29,7 @@ bin
|
||||
.env
|
||||
.test.env
|
||||
*.pem
|
||||
!github/actions/testdata/*.pem
|
||||
|
||||
# OS
|
||||
.DS_STORE
|
||||
|
||||
17
.golangci.yaml
Normal file
17
.golangci.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
run:
|
||||
timeout: 3m
|
||||
output:
|
||||
format: github-actions
|
||||
linters-settings:
|
||||
errcheck:
|
||||
exclude-functions:
|
||||
- (net/http.ResponseWriter).Write
|
||||
- (*net/http.Server).Shutdown
|
||||
- (*github.com/actions/actions-runner-controller/simulator.VisibleRunnerGroups).Add
|
||||
- (*github.com/actions/actions-runner-controller/testing.Kind).Stop
|
||||
issues:
|
||||
exclude-rules:
|
||||
- path: controllers/suite_test.go
|
||||
linters:
|
||||
- staticcheck
|
||||
text: "SA1019"
|
||||
@@ -1,2 +1,2 @@
|
||||
# actions-runner-controller maintainers
|
||||
* @mumoshu @toast-gear
|
||||
* @mumoshu @toast-gear @actions/actions-runtime @nikola-jokic
|
||||
|
||||
74
CODE_OF_CONDUCT.md
Normal file
74
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,74 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
nationality, personal appearance, race, religion, or sexual identity and
|
||||
orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at opensource@github.com. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
||||
222
CONTRIBUTING.md
222
CONTRIBUTING.md
@@ -1,85 +1,53 @@
|
||||
## Contributing
|
||||
# Contribution Guide
|
||||
|
||||
### Testing Controller Built from a Pull Request
|
||||
- [Contribution Guide](#contribution-guide)
|
||||
- [Welcome](#welcome)
|
||||
- [Before contributing code](#before-contributing-code)
|
||||
- [How to Contribute a Patch](#how-to-contribute-a-patch)
|
||||
- [Developing the Controller](#developing-the-controller)
|
||||
- [Developing the Runners](#developing-the-runners)
|
||||
- [Tests](#tests)
|
||||
- [Running Ginkgo Tests](#running-ginkgo-tests)
|
||||
- [Running End to End Tests](#running-end-to-end-tests)
|
||||
- [Rerunning a failed test](#rerunning-a-failed-test)
|
||||
- [Testing in a non-kind cluster](#testing-in-a-non-kind-cluster)
|
||||
- [Code conventions](#code-conventions)
|
||||
- [Opening the Pull Request](#opening-the-pull-request)
|
||||
- [Helm Version Changes](#helm-version-changes)
|
||||
- [Testing Controller Built from a Pull Request](#testing-controller-built-from-a-pull-request)
|
||||
|
||||
We always appreciate your help in testing open pull requests by deploying custom builds of actions-runner-controller onto your own environment, so that we are extra sure we didn't break anything.
|
||||
## Welcome
|
||||
|
||||
It is especially true when the pull request is about GitHub Enterprise, both GHEC and GHES, as [maintainers don't have GitHub Enterprise environments for testing](/README.md#github-enterprise-support).
|
||||
This document is the single source of truth for how to contribute to the code base.
|
||||
Feel free to browse the [open issues](https://github.com/actions/actions-runner-controller/issues) or file a new one, all feedback is welcome!
|
||||
By reading this guide, we hope to give you all of the information you need to be able to pick up issues, contribute new features, and get your work
|
||||
reviewed and merged.
|
||||
|
||||
The process would look like the below:
|
||||
## Before contributing code
|
||||
|
||||
- Clone this repository locally
|
||||
- Checkout the branch. If you use the `gh` command, run `gh pr checkout $PR_NUMBER`
|
||||
- Run `NAME=$DOCKER_USER/actions-runner-controller VERSION=canary make docker-build docker-push` for a custom container image build
|
||||
- Update your actions-runner-controller's controller-manager deployment to use the new image, `$DOCKER_USER/actions-runner-controller:canary`
|
||||
We welcome code patches, but to make sure things are well coordinated you should discuss any significant change before starting the work.
|
||||
The maintainers ask that you signal your intention to contribute to the project using the issue tracker.
|
||||
If there is an existing issue that you want to work on, please let us know so we can get it assigned to you.
|
||||
If you noticed a bug or want to add a new feature, there are issue templates you can fill out.
|
||||
|
||||
Please also note that you need to replace `$DOCKER_USER` with your own DockerHub account name.
|
||||
When filing a feature request, the maintainers will review the change and give you a decision on whether we are willing to accept the feature into the project.
|
||||
For significantly large and/or complex features, we may request that you write up an architectural decision record ([ADR](https://github.blog/2020-08-13-why-write-adrs/)) detailing the change.
|
||||
Please use the [template](/adrs/0000-TEMPLATE.md) as guidance.
|
||||
|
||||
### How to Contribute a Patch
|
||||
<!--
|
||||
TODO: Add a pre-requisite section describing what developers should
|
||||
install in order get started on ARC.
|
||||
-->
|
||||
|
||||
Depending on what you are patching depends on how you should go about it. Below are some guides on how to test patches locally as well as develop the controller and runners.
|
||||
## How to Contribute a Patch
|
||||
|
||||
When submitting a PR for a change please provide evidence that your change works as we still need to work on improving the CI of the project. Some resources are provided for helping achieve this, see this guide for details.
|
||||
Depending on what you are patching depends on how you should go about it.
|
||||
Below are some guides on how to test patches locally as well as develop the controller and runners.
|
||||
|
||||
#### Running an End to End Test
|
||||
When submitting a PR for a change please provide evidence that your change works as we still need to work on improving the CI of the project.
|
||||
Some resources are provided for helping achieve this, see this guide for details.
|
||||
|
||||
> **Notes for Ubuntu 20.04+ users**
|
||||
>
|
||||
> If you're using Ubuntu 20.04 or greater, you might have installed `docker` with `snap`.
|
||||
>
|
||||
> If you want to stick with `snap`-provided `docker`, do not forget to set `TMPDIR` to
|
||||
> somewhere under `$HOME`.
|
||||
> Otherwise `kind load docker-image` fail while running `docker save`.
|
||||
> See https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap for more information.
|
||||
|
||||
To test your local changes against both PAT and App based authentication please run the `acceptance` make target with the authentication configuration details provided:
|
||||
|
||||
```shell
|
||||
# This sets `VERSION` envvar to some appropriate value
|
||||
. hack/make-env.sh
|
||||
|
||||
DOCKER_USER=*** \
|
||||
GITHUB_TOKEN=*** \
|
||||
APP_ID=*** \
|
||||
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
||||
INSTALLATION_ID=*** \
|
||||
make acceptance
|
||||
```
|
||||
|
||||
**Rerunning a failed test**
|
||||
|
||||
When one of tests run by `make acceptance` failed, you'd probably like to rerun only the failed one.
|
||||
|
||||
It can be done by `make acceptance/run` and by setting the combination of `ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm|kubectl` and `ACCEPTANCE_TEST_SECRET_TYPE=token|app` values that failed (note, you just need to set the corresponding authentication configuration in this circumstance)
|
||||
|
||||
In the example below, we rerun the test for the combination `ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=token` only:
|
||||
|
||||
```shell
|
||||
DOCKER_USER=*** \
|
||||
GITHUB_TOKEN=*** \
|
||||
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm
|
||||
ACCEPTANCE_TEST_SECRET_TYPE=token \
|
||||
make acceptance/run
|
||||
```
|
||||
|
||||
**Testing in a non-kind cluster**
|
||||
|
||||
If you prefer to test in a non-kind cluster, you can instead run:
|
||||
|
||||
```shell
|
||||
KUBECONFIG=path/to/kubeconfig \
|
||||
DOCKER_USER=*** \
|
||||
GITHUB_TOKEN=*** \
|
||||
APP_ID=*** \
|
||||
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
||||
INSTALLATION_ID=*** \
|
||||
ACCEPTANCE_TEST_SECRET_TYPE=token \
|
||||
make docker-build acceptance/setup \
|
||||
acceptance/deploy \
|
||||
acceptance/tests
|
||||
```
|
||||
|
||||
#### Developing the Controller
|
||||
### Developing the Controller
|
||||
|
||||
Rerunning the whole acceptance test suite from scratch on every little change to the controller, the runner, and the chart would be counter-productive.
|
||||
|
||||
@@ -119,13 +87,14 @@ NAME=$DOCKER_USER/actions-runner make \
|
||||
(kubectl get po -ojsonpath={.items[*].metadata.name} | xargs -n1 kubectl delete po)
|
||||
```
|
||||
|
||||
#### Developing the Runners
|
||||
### Developing the Runners
|
||||
|
||||
**Tests**
|
||||
#### Tests
|
||||
|
||||
A set of example pipelines (./acceptance/pipelines) are provided in this repository which you can use to validate your runners are working as expected. When raising a PR please run the relevant suites to prove your change hasn't broken anything.
|
||||
A set of example pipelines (./acceptance/pipelines) are provided in this repository which you can use to validate your runners are working as expected.
|
||||
When raising a PR please run the relevant suites to prove your change hasn't broken anything.
|
||||
|
||||
**Running Ginkgo Tests**
|
||||
#### Running Ginkgo Tests
|
||||
|
||||
You can run the integration test suite that is written in Ginkgo with:
|
||||
|
||||
@@ -135,13 +104,14 @@ make test-with-deps
|
||||
|
||||
This will firstly install a few binaries required to setup the integration test environment and then runs `go test` to start the Ginkgo test.
|
||||
|
||||
If you don't want to use `make`, like when you're running tests from your IDE, install required binaries to `/usr/local/kubebuilder/bin`. That's the directory in which controller-runtime's `envtest` framework locates the binaries.
|
||||
If you don't want to use `make`, like when you're running tests from your IDE, install required binaries to `/usr/local/kubebuilder/bin`.
|
||||
That's the directory in which controller-runtime's `envtest` framework locates the binaries.
|
||||
|
||||
```shell
|
||||
sudo mkdir -p /usr/local/kubebuilder/bin
|
||||
make kube-apiserver etcd
|
||||
sudo mv test-assets/{etcd,kube-apiserver} /usr/local/kubebuilder/bin/
|
||||
go test -v -run TestAPIs github.com/actions-runner-controller/actions-runner-controller/controllers
|
||||
go test -v -run TestAPIs github.com/actions/actions-runner-controller/controllers/actions.summerwind.net
|
||||
```
|
||||
|
||||
To run Ginkgo tests selectively, set the pattern of target test names to `GINKGO_FOCUS`.
|
||||
@@ -149,9 +119,101 @@ All the Ginkgo test that matches `GINKGO_FOCUS` will be run.
|
||||
|
||||
```shell
|
||||
GINKGO_FOCUS='[It] should create a new Runner resource from the specified template, add a another Runner on replicas increased, and removes all the replicas when set to 0' \
|
||||
go test -v -run TestAPIs github.com/actions-runner-controller/actions-runner-controller/controllers
|
||||
go test -v -run TestAPIs github.com/actions/actions-runner-controller/controllers/actions.summerwind.net
|
||||
```
|
||||
|
||||
#### Helm Version Bumps
|
||||
### Running End to End Tests
|
||||
|
||||
In general we ask you not to bump the version in your PR, the maintainers in general manage the publishing of a new chart.
|
||||
> **Notes for Ubuntu 20.04+ users**
|
||||
>
|
||||
> If you're using Ubuntu 20.04 or greater, you might have installed `docker` with `snap`.
|
||||
>
|
||||
> If you want to stick with `snap`-provided `docker`, do not forget to set `TMPDIR` to somewhere under `$HOME`.
|
||||
> Otherwise `kind load docker-image` fail while running `docker save`.
|
||||
> See https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap for more information.
|
||||
|
||||
To test your local changes against both PAT and App based authentication please run the `acceptance` make target with the authentication configuration details provided:
|
||||
|
||||
```shell
|
||||
# This sets `VERSION` envvar to some appropriate value
|
||||
. hack/make-env.sh
|
||||
|
||||
DOCKER_USER=*** \
|
||||
GITHUB_TOKEN=*** \
|
||||
APP_ID=*** \
|
||||
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
||||
INSTALLATION_ID=*** \
|
||||
make acceptance
|
||||
```
|
||||
|
||||
#### Rerunning a failed test
|
||||
|
||||
When one of tests run by `make acceptance` failed, you'd probably like to rerun only the failed one.
|
||||
|
||||
It can be done by `make acceptance/run` and by setting the combination of `ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm|kubectl` and `ACCEPTANCE_TEST_SECRET_TYPE=token|app` values that failed (note, you just need to set the corresponding authentication configuration in this circumstance)
|
||||
|
||||
In the example below, we rerun the test for the combination `ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=token` only:
|
||||
|
||||
```shell
|
||||
DOCKER_USER=*** \
|
||||
GITHUB_TOKEN=*** \
|
||||
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm \
|
||||
ACCEPTANCE_TEST_SECRET_TYPE=token \
|
||||
make acceptance/run
|
||||
```
|
||||
|
||||
#### Testing in a non-kind cluster
|
||||
|
||||
If you prefer to test in a non-kind cluster, you can instead run:
|
||||
|
||||
```shell
|
||||
KUBECONFIG=path/to/kubeconfig \
|
||||
DOCKER_USER=*** \
|
||||
GITHUB_TOKEN=*** \
|
||||
APP_ID=*** \
|
||||
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
||||
INSTALLATION_ID=*** \
|
||||
ACCEPTANCE_TEST_SECRET_TYPE=token \
|
||||
make docker-build acceptance/setup \
|
||||
acceptance/deploy \
|
||||
acceptance/tests
|
||||
```
|
||||
|
||||
### Code conventions
|
||||
|
||||
Before shipping your PR, please check the following items to make sure CI passes.
|
||||
|
||||
- Run `go mod tidy` if you made changes to dependencies.
|
||||
- Format the code using `gofmt`
|
||||
- Run the `golangci-lint` tool locally.
|
||||
- We recommend you use `make lint` to run the tool using a Docker container matching the CI version.
|
||||
|
||||
### Opening the Pull Request
|
||||
|
||||
Send PR, add issue number to description
|
||||
|
||||
## Helm Version Changes
|
||||
|
||||
In general we ask you not to bump the version in your PR.
|
||||
The maintainers will manage releases and publishing new charts.
|
||||
|
||||
## Testing Controller Built from a Pull Request
|
||||
|
||||
We always appreciate your help in testing open pull requests by deploying custom builds of actions-runner-controller onto your own environment, so that we are extra sure we didn't break anything.
|
||||
|
||||
It is especially true when the pull request is about GitHub Enterprise, both GHEC and GHES, as [maintainers don't have GitHub Enterprise environments for testing](docs/about-arc.md#github-enterprise-support).
|
||||
|
||||
The process would look like the below:
|
||||
|
||||
- Clone this repository locally
|
||||
- Checkout the branch. If you use the `gh` command, run `gh pr checkout $PR_NUMBER`
|
||||
- Run `NAME=$DOCKER_USER/actions-runner-controller VERSION=canary make docker-build docker-push` for a custom container image build
|
||||
- Update your actions-runner-controller's controller-manager deployment to use the new image, `$DOCKER_USER/actions-runner-controller:canary`
|
||||
|
||||
Please also note that you need to replace `$DOCKER_USER` with your own DockerHub account name.
|
||||
|
||||
## Release process
|
||||
|
||||
Only the maintainers can release a new version of actions-runner-controller, publish a new version of the helm charts, and runner images.
|
||||
|
||||
All release workflows have been moved to [actions-runner-controller/releases](https://github.com/actions-runner-controller/releases) since the packages are owned by the former organization.
|
||||
|
||||
17
Dockerfile
17
Dockerfile
@@ -1,11 +1,10 @@
|
||||
# Build the manager binary
|
||||
FROM --platform=$BUILDPLATFORM golang:1.18.3 as builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.19.4 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
# Make it runnable on a distroless image/without libc
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
# Copy the Go Modules manifests
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
@@ -25,20 +24,22 @@ RUN go mod download
|
||||
# With the above commmand,
|
||||
# TARGETOS can be "linux", TARGETARCH can be "amd64", "arm64", and "arm", TARGETVARIANT can be "v7".
|
||||
|
||||
ARG TARGETPLATFORM TARGETOS TARGETARCH TARGETVARIANT
|
||||
ARG TARGETPLATFORM TARGETOS TARGETARCH TARGETVARIANT VERSION=dev
|
||||
|
||||
# We intentionally avoid `--mount=type=cache,mode=0777,target=/go/pkg/mod` in the `go mod download` and the `go build` runs
|
||||
# to avoid https://github.com/moby/buildkit/issues/2334
|
||||
# We can use docker layer cache so the build is fast enogh anyway
|
||||
# We also use per-platform GOCACHE for the same reason.
|
||||
env GOCACHE /build/${TARGETPLATFORM}/root/.cache/go-build
|
||||
ENV GOCACHE /build/${TARGETPLATFORM}/root/.cache/go-build
|
||||
|
||||
# Build
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,mode=0777,target=${GOCACHE} \
|
||||
export GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOARM=${TARGETVARIANT#v} && \
|
||||
go build -o /out/manager main.go && \
|
||||
go build -o /out/github-webhook-server ./cmd/githubwebhookserver
|
||||
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}'" -o /out/manager main.go && \
|
||||
go build -trimpath -ldflags="-s -w" -o /out/github-runnerscaleset-listener ./cmd/githubrunnerscalesetlistener && \
|
||||
go build -trimpath -ldflags="-s -w" -o /out/github-webhook-server ./cmd/githubwebhookserver && \
|
||||
go build -trimpath -ldflags="-s -w" -o /out/actions-metrics-server ./cmd/actionsmetricsserver
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
@@ -48,7 +49,9 @@ WORKDIR /
|
||||
|
||||
COPY --from=builder /out/manager .
|
||||
COPY --from=builder /out/github-webhook-server .
|
||||
COPY --from=builder /out/actions-metrics-server .
|
||||
COPY --from=builder /out/github-runnerscaleset-listener .
|
||||
|
||||
USER nonroot:nonroot
|
||||
USER 65532:65532
|
||||
|
||||
ENTRYPOINT ["/manager"]
|
||||
|
||||
150
Makefile
150
Makefile
@@ -1,11 +1,11 @@
|
||||
ifdef DOCKER_USER
|
||||
NAME ?= ${DOCKER_USER}/actions-runner-controller
|
||||
DOCKER_IMAGE_NAME ?= ${DOCKER_USER}/actions-runner-controller
|
||||
else
|
||||
NAME ?= summerwind/actions-runner-controller
|
||||
DOCKER_IMAGE_NAME ?= summerwind/actions-runner-controller
|
||||
endif
|
||||
DOCKER_USER ?= $(shell echo ${NAME} | cut -d / -f1)
|
||||
VERSION ?= latest
|
||||
RUNNER_VERSION ?= 2.294.0
|
||||
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
|
||||
VERSION ?= dev
|
||||
RUNNER_VERSION ?= 2.301.1
|
||||
TARGETPLATFORM ?= $(shell arch)
|
||||
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||
RUNNER_TAG ?= ${VERSION}
|
||||
@@ -19,6 +19,7 @@ KUBECONTEXT ?= kind-acceptance
|
||||
CLUSTER ?= acceptance
|
||||
CERT_MANAGER_VERSION ?= v1.1.1
|
||||
KUBE_RBAC_PROXY_VERSION ?= v0.11.0
|
||||
SHELLCHECK_VERSION ?= 0.8.0
|
||||
|
||||
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
|
||||
CRD_OPTIONS ?= "crd:generateEmbeddedObjectMeta=true"
|
||||
@@ -31,6 +32,20 @@ GOBIN=$(shell go env GOBIN)
|
||||
endif
|
||||
|
||||
TEST_ASSETS=$(PWD)/test-assets
|
||||
TOOLS_PATH=$(PWD)/.tools
|
||||
|
||||
OS_NAME := $(shell uname -s | tr A-Z a-z)
|
||||
|
||||
# The etcd packages that coreos maintain use different extensions for each *nix OS on their github release page.
|
||||
# ETCD_EXTENSION: the storage format file extension listed on the release page.
|
||||
# EXTRACT_COMMAND: the appropriate CLI command for extracting this file format.
|
||||
ifeq ($(OS_NAME), darwin)
|
||||
ETCD_EXTENSION:=zip
|
||||
EXTRACT_COMMAND:=unzip
|
||||
else
|
||||
ETCD_EXTENSION:=tar.gz
|
||||
EXTRACT_COMMAND:=tar -xzf
|
||||
endif
|
||||
|
||||
# default list of platforms for which multiarch image is built
|
||||
ifeq (${PLATFORMS}, )
|
||||
@@ -51,12 +66,15 @@ endif
|
||||
|
||||
all: manager
|
||||
|
||||
lint:
|
||||
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.49.0 golangci-lint run
|
||||
|
||||
GO_TEST_ARGS ?= -short
|
||||
|
||||
# Run tests
|
||||
test: generate fmt vet manifests
|
||||
test: generate fmt vet manifests shellcheck
|
||||
go test $(GO_TEST_ARGS) ./... -coverprofile cover.out
|
||||
go test -fuzz=Fuzz -fuzztime=10s -run=Fuzz* ./controllers
|
||||
go test -fuzz=Fuzz -fuzztime=10s -run=Fuzz* ./controllers/actions.summerwind.net
|
||||
|
||||
test-with-deps: kube-apiserver etcd kubectl
|
||||
# See https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#pkg-constants
|
||||
@@ -68,6 +86,7 @@ test-with-deps: kube-apiserver etcd kubectl
|
||||
# Build manager binary
|
||||
manager: generate fmt vet
|
||||
go build -o bin/manager main.go
|
||||
go build -o bin/github-runnerscaleset-listener ./cmd/githubrunnerscalesetlistener
|
||||
|
||||
# Run against the configured Kubernetes cluster in ~/.kube/config
|
||||
run: generate fmt vet manifests
|
||||
@@ -83,7 +102,7 @@ uninstall: manifests
|
||||
|
||||
# Deploy controller in the configured Kubernetes cluster in ~/.kube/config
|
||||
deploy: manifests
|
||||
cd config/manager && kustomize edit set image controller=${NAME}:${VERSION}
|
||||
cd config/manager && kustomize edit set image controller=${DOCKER_IMAGE_NAME}:${VERSION}
|
||||
kustomize build config/default | kubectl apply -f -
|
||||
|
||||
# Generate manifests e.g. CRD, RBAC etc.
|
||||
@@ -92,11 +111,72 @@ manifests: manifests-gen-crds chart-crds
|
||||
manifests-gen-crds: controller-gen yq
|
||||
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||
for YAMLFILE in config/crd/bases/actions*.yaml; do \
|
||||
$(YQ) write --inplace "$$YAMLFILE" spec.preserveUnknownFields false; \
|
||||
$(YQ) '.spec.preserveUnknownFields = false' --inplace "$$YAMLFILE" ; \
|
||||
done
|
||||
#runners
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.sidecarContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.dockerdContainerResources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml
|
||||
#runnerreplicasets
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.sidecarContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.dockerdContainerResources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml
|
||||
#runnerdeployments
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.sidecarContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.dockerdContainerResources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml
|
||||
#runnersets
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.volumeClaimTemplates.items.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml
|
||||
#autoscalingrunnersets
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml
|
||||
#ehemeralrunnersets
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml
|
||||
# ephemeralrunners
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml
|
||||
$(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml
|
||||
|
||||
chart-crds:
|
||||
cp config/crd/bases/*.yaml charts/actions-runner-controller/crds/
|
||||
cp config/crd/bases/actions.github.com_autoscalingrunnersets.yaml charts/actions-runner-controller-2/crds/
|
||||
cp config/crd/bases/actions.github.com_autoscalinglisteners.yaml charts/actions-runner-controller-2/crds/
|
||||
cp config/crd/bases/actions.github.com_ephemeralrunnersets.yaml charts/actions-runner-controller-2/crds/
|
||||
cp config/crd/bases/actions.github.com_ephemeralrunners.yaml charts/actions-runner-controller-2/crds/
|
||||
rm charts/actions-runner-controller/crds/actions.github.com_autoscalingrunnersets.yaml
|
||||
rm charts/actions-runner-controller/crds/actions.github.com_autoscalinglisteners.yaml
|
||||
rm charts/actions-runner-controller/crds/actions.github.com_ephemeralrunnersets.yaml
|
||||
rm charts/actions-runner-controller/crds/actions.github.com_ephemeralrunners.yaml
|
||||
|
||||
# Run go fmt against code
|
||||
fmt:
|
||||
@@ -110,6 +190,10 @@ vet:
|
||||
generate: controller-gen
|
||||
$(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths="./..."
|
||||
|
||||
# Run shellcheck on runner scripts
|
||||
shellcheck: shellcheck-install
|
||||
$(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh
|
||||
|
||||
docker-buildx:
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled ;\
|
||||
export DOCKER_BUILDKIT=1
|
||||
@@ -119,18 +203,19 @@ docker-buildx:
|
||||
docker buildx build --platform ${PLATFORMS} \
|
||||
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
||||
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
||||
-t "${NAME}:${VERSION}" \
|
||||
--build-arg VERSION=${VERSION} \
|
||||
-t "${DOCKER_IMAGE_NAME}:${VERSION}" \
|
||||
-f Dockerfile \
|
||||
. ${PUSH_ARG}
|
||||
|
||||
# Push the docker image
|
||||
docker-push:
|
||||
docker push ${NAME}:${VERSION}
|
||||
docker push ${DOCKER_IMAGE_NAME}:${VERSION}
|
||||
docker push ${RUNNER_NAME}:${RUNNER_TAG}
|
||||
|
||||
# Generate the release manifest file
|
||||
release: manifests
|
||||
cd config/manager && kustomize edit set image controller=${NAME}:${VERSION}
|
||||
cd config/manager && kustomize edit set image controller=${DOCKER_IMAGE_NAME}:${VERSION}
|
||||
mkdir -p release
|
||||
kustomize build config/default > release/actions-runner-controller.yaml
|
||||
|
||||
@@ -154,7 +239,7 @@ acceptance/kind:
|
||||
# Otherwise `load docker-image` fail while running `docker save`.
|
||||
# See https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap
|
||||
acceptance/load:
|
||||
kind load docker-image ${NAME}:${VERSION} --name ${CLUSTER}
|
||||
kind load docker-image ${DOCKER_IMAGE_NAME}:${VERSION} --name ${CLUSTER}
|
||||
kind load docker-image quay.io/brancz/kube-rbac-proxy:$(KUBE_RBAC_PROXY_VERSION) --name ${CLUSTER}
|
||||
kind load docker-image ${RUNNER_NAME}:${RUNNER_TAG} --name ${CLUSTER}
|
||||
kind load docker-image docker:dind --name ${CLUSTER}
|
||||
@@ -184,7 +269,7 @@ acceptance/teardown:
|
||||
kind delete cluster --name ${CLUSTER}
|
||||
|
||||
acceptance/deploy:
|
||||
NAME=${NAME} DOCKER_USER=${DOCKER_USER} VERSION=${VERSION} RUNNER_NAME=${RUNNER_NAME} RUNNER_TAG=${RUNNER_TAG} TEST_REPO=${TEST_REPO} \
|
||||
DOCKER_IMAGE_NAME=${DOCKER_IMAGE_NAME} DOCKER_USER=${DOCKER_USER} VERSION=${VERSION} RUNNER_NAME=${RUNNER_NAME} RUNNER_TAG=${RUNNER_TAG} TEST_REPO=${TEST_REPO} \
|
||||
TEST_ORG=${TEST_ORG} TEST_ORG_REPO=${TEST_ORG_REPO} SYNC_PERIOD=${SYNC_PERIOD} \
|
||||
USE_RUNNERSET=${USE_RUNNERSET} \
|
||||
TEST_EPHEMERAL=${TEST_EPHEMERAL} \
|
||||
@@ -193,8 +278,8 @@ acceptance/deploy:
|
||||
acceptance/tests:
|
||||
acceptance/checks.sh
|
||||
|
||||
acceptance/runner/entrypoint:
|
||||
cd test/entrypoint/ && bash test.sh
|
||||
acceptance/runner/startup:
|
||||
cd test/startup/ && bash test.sh
|
||||
|
||||
# We use -count=1 instead of `go clean -testcache`
|
||||
# See https://terratest.gruntwork.io/docs/testing-best-practices/avoid-test-caching/
|
||||
@@ -242,13 +327,30 @@ ifeq (, $(wildcard $(GOBIN)/yq))
|
||||
YQ_TMP_DIR=$$(mktemp -d) ;\
|
||||
cd $$YQ_TMP_DIR ;\
|
||||
go mod init tmp ;\
|
||||
go install github.com/mikefarah/yq/v3@3.4.0 ;\
|
||||
go install github.com/mikefarah/yq/v4@v4.25.3 ;\
|
||||
rm -rf $$YQ_TMP_DIR ;\
|
||||
}
|
||||
endif
|
||||
YQ=$(GOBIN)/yq
|
||||
|
||||
OS_NAME := $(shell uname -s | tr A-Z a-z)
|
||||
# find or download shellcheck
|
||||
# download shellcheck if necessary
|
||||
shellcheck-install:
|
||||
ifeq (, $(wildcard $(TOOLS_PATH)/shellcheck))
|
||||
echo "Downloading shellcheck"
|
||||
@{ \
|
||||
set -e ;\
|
||||
SHELLCHECK_TMP_DIR=$$(mktemp -d) ;\
|
||||
cd $$SHELLCHECK_TMP_DIR ;\
|
||||
curl -LO https://github.com/koalaman/shellcheck/releases/download/v$(SHELLCHECK_VERSION)/shellcheck-v$(SHELLCHECK_VERSION).$(OS_NAME).x86_64.tar.xz ;\
|
||||
tar Jxvf shellcheck-v$(SHELLCHECK_VERSION).$(OS_NAME).x86_64.tar.xz ;\
|
||||
cd $(CURDIR) ;\
|
||||
mkdir -p $(TOOLS_PATH) ;\
|
||||
mv $$SHELLCHECK_TMP_DIR/shellcheck-v$(SHELLCHECK_VERSION)/shellcheck $(TOOLS_PATH)/ ;\
|
||||
rm -rf $$SHELLCHECK_TMP_DIR ;\
|
||||
}
|
||||
endif
|
||||
SHELLCHECK=$(TOOLS_PATH)/shellcheck
|
||||
|
||||
# find or download etcd
|
||||
etcd:
|
||||
@@ -258,12 +360,10 @@ ifeq (, $(wildcard $(TEST_ASSETS)/etcd))
|
||||
set -xe ;\
|
||||
INSTALL_TMP_DIR=$$(mktemp -d) ;\
|
||||
cd $$INSTALL_TMP_DIR ;\
|
||||
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||
wget https://github.com/coreos/etcd/releases/download/v3.4.22/etcd-v3.4.22-$(OS_NAME)-amd64.$(ETCD_EXTENSION);\
|
||||
mkdir -p $(TEST_ASSETS) ;\
|
||||
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/etcd $(TEST_ASSETS)/etcd ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
|
||||
$(EXTRACT_COMMAND) etcd-v3.4.22-$(OS_NAME)-amd64.$(ETCD_EXTENSION) ;\
|
||||
mv etcd-v3.4.22-$(OS_NAME)-amd64/etcd $(TEST_ASSETS)/etcd ;\
|
||||
rm -rf $$INSTALL_TMP_DIR ;\
|
||||
}
|
||||
ETCD_BIN=$(TEST_ASSETS)/etcd
|
||||
@@ -285,9 +385,7 @@ ifeq (, $(wildcard $(TEST_ASSETS)/kube-apiserver))
|
||||
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||
mkdir -p $(TEST_ASSETS) ;\
|
||||
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/etcd $(TEST_ASSETS)/etcd ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
|
||||
rm -rf $$INSTALL_TMP_DIR ;\
|
||||
}
|
||||
KUBE_APISERVER_BIN=$(TEST_ASSETS)/kube-apiserver
|
||||
@@ -309,8 +407,6 @@ ifeq (, $(wildcard $(TEST_ASSETS)/kubectl))
|
||||
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||
mkdir -p $(TEST_ASSETS) ;\
|
||||
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/etcd $(TEST_ASSETS)/etcd ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
|
||||
rm -rf $$INSTALL_TMP_DIR ;\
|
||||
}
|
||||
|
||||
14
PROJECT
14
PROJECT
@@ -1,5 +1,5 @@
|
||||
domain: summerwind.dev
|
||||
repo: github.com/actions-runner-controller/actions-runner-controller
|
||||
repo: github.com/actions/actions-runner-controller
|
||||
resources:
|
||||
- group: actions
|
||||
kind: Runner
|
||||
@@ -10,4 +10,16 @@ resources:
|
||||
- group: actions
|
||||
kind: RunnerDeployment
|
||||
version: v1alpha1
|
||||
- group: actions
|
||||
kind: AutoscalingRunnerSet
|
||||
version: v1alpha1
|
||||
- group: actions
|
||||
kind: EphemeralRunnerSet
|
||||
version: v1alpha1
|
||||
- group: actions
|
||||
kind: EphemeralRunner
|
||||
version: v1alpha1
|
||||
- group: actions
|
||||
kind: AutoscalingListener
|
||||
version: v1alpha1
|
||||
version: "2"
|
||||
|
||||
35
SECURITY.md
35
SECURITY.md
@@ -1,22 +1,31 @@
|
||||
# Security Policy
|
||||
Thanks for helping make GitHub safe for everyone.
|
||||
|
||||
## Sponsoring the project
|
||||
## Security
|
||||
|
||||
This project is maintained by a small team of two and therefore lacks the resource to provide security fixes in a timely manner.
|
||||
GitHub takes the security of our software products and services seriously, including all of the open source code repositories managed through our GitHub organizations, such as [GitHub](https://github.com/GitHub).
|
||||
|
||||
If you have important business(es) that relies on this project, please consider sponsoring the project so that the maintainer(s) can commit to providing such service.
|
||||
Even though [open source repositories are outside of the scope of our bug bounty program](https://bounty.github.com/index.html#scope) and therefore not eligible for bounty rewards, we will ensure that your finding gets passed along to the appropriate maintainers for remediation.
|
||||
|
||||
Please refer to https://github.com/sponsors/actions-runner-controller for available tiers.
|
||||
## Reporting Security Issues
|
||||
|
||||
## Supported Versions
|
||||
If you believe you have found a security vulnerability in any GitHub-owned repository, please report it to us through coordinated disclosure.
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 0.23.0 | :white_check_mark: |
|
||||
| < 0.23.0| :x: |
|
||||
**Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.**
|
||||
|
||||
## Reporting a Vulnerability
|
||||
Instead, please send an email to opensource-security[@]github.com.
|
||||
|
||||
To report a security issue, please email ykuoka+arcsecurity(at)gmail.com with a description of the issue, the steps you took to create the issue, affected versions, and, if known, mitigations for the issue.
|
||||
Please include as much of the information listed below as you can to help us better understand and resolve the issue:
|
||||
|
||||
A maintainer will try to respond within 5 working days. If the issue is confirmed as a vulnerability, a Security Advisory will be opened. This project tries to follow a 90 day disclosure timeline.
|
||||
* The type of issue (e.g., buffer overflow, SQL injection, or cross-site scripting)
|
||||
* Full paths of source file(s) related to the manifestation of the issue
|
||||
* The location of the affected source code (tag/branch/commit or direct URL)
|
||||
* Any special configuration required to reproduce the issue
|
||||
* Step-by-step instructions to reproduce the issue
|
||||
* Proof-of-concept or exploit code (if possible)
|
||||
* Impact of the issue, including how an attacker might exploit the issue
|
||||
|
||||
This information will help us triage your report more quickly.
|
||||
|
||||
## Policy
|
||||
|
||||
See [GitHub's Safe Harbor Policy](https://docs.github.com/en/github/site-policy/github-bug-bounty-program-legal-safe-harbor#1-safe-harbor-terms)
|
||||
@@ -11,13 +11,14 @@
|
||||
* [Runner coming up before network available](#runner-coming-up-before-network-available)
|
||||
* [Outgoing network action hangs indefinitely](#outgoing-network-action-hangs-indefinitely)
|
||||
* [Unable to scale to zero with TotalNumberOfQueuedAndInProgressWorkflowRuns](#unable-to-scale-to-zero-with-totalnumberofqueuedandinprogressworkflowruns)
|
||||
* [Slow / failure to boot dind sidecar (default runner)](#slow--failure-to-boot-dind-sidecar-default-runner)
|
||||
|
||||
## Tools
|
||||
|
||||
A list of tools which are helpful for troubleshooting
|
||||
|
||||
* https://github.com/rewanthtammana/kubectl-fields Kubernetes resources hierarchy parsing tool
|
||||
* https://github.com/stern/stern Multi pod and container log tailing for Kubernetes
|
||||
* [Kubernetes resources hierarchy parsing tool `kubectl-fields`](https://github.com/rewanthtammana/kubectl-fields)
|
||||
* [Multi pod and container log tailing for Kubernetes `stern`](https://github.com/stern/stern)
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -29,7 +30,7 @@ Troubeshooting runbooks that relate to ARC installation problems
|
||||
|
||||
This issue can come up for various reasons like leftovers from previous installations or not being able to access the K8s service's clusterIP associated with the admission webhook server (of ARC).
|
||||
|
||||
```
|
||||
```text
|
||||
Internal error occurred: failed calling webhook "mutate.runnerdeployment.actions.summerwind.dev":
|
||||
Post "https://actions-runner-controller-webhook.actions-runner-system.svc:443/mutate-actions-summerwind-dev-v1alpha1-runnerdeployment?timeout=10s": context deadline exceeded
|
||||
```
|
||||
@@ -38,21 +39,23 @@ Post "https://actions-runner-controller-webhook.actions-runner-system.svc:443/mu
|
||||
|
||||
First we will try the common solution of checking webhook leftovers from previous installations:
|
||||
|
||||
1. ```bash
|
||||
kubectl get validatingwebhookconfiguration -A
|
||||
kubectl get mutatingwebhookconfiguration -A
|
||||
```
|
||||
2. If you see any webhooks related to actions-runner-controller, delete them:
|
||||
1. ```bash
|
||||
kubectl get validatingwebhookconfiguration -A
|
||||
kubectl get mutatingwebhookconfiguration -A
|
||||
```
|
||||
|
||||
2. If you see any webhooks related to actions-runner-controller, delete them:
|
||||
|
||||
```bash
|
||||
kubectl delete mutatingwebhookconfiguration actions-runner-controller-mutating-webhook-configuration
|
||||
kubectl delete validatingwebhookconfiguration actions-runner-controller-validating-webhook-configuration
|
||||
```
|
||||
|
||||
If that didn't work then probably your K8s control-plane is somehow unable to access the K8s service's clusterIP associated with the admission webhook server:
|
||||
|
||||
1. You're running apiserver as a binary and you didn't make service cluster IPs available to the host network.
|
||||
2. You're running the apiserver in the pod but your pod network (i.e. CNI plugin installation and config) is not good so your pods(like kube-apiserver) in the K8s control-plane nodes can't access ARC's admission webhook server pod(s) in probably data-plane nodes.
|
||||
|
||||
|
||||
Another reason could be due to GKEs firewall settings you may run into the following errors when trying to deploy runners on a private GKE cluster:
|
||||
|
||||
To fix this, you may either:
|
||||
@@ -62,7 +65,7 @@ To fix this, you may either:
|
||||
|
||||
```sh
|
||||
# With helm, you'd set `webhookPort` to the port number of your choice
|
||||
# See https://github.com/actions-runner-controller/actions-runner-controller/pull/1410/files for more information
|
||||
# See https://github.com/actions/actions-runner-controller/pull/1410/files for more information
|
||||
helm upgrade --install --namespace actions-runner-system --create-namespace \
|
||||
--wait actions-runner-controller actions-runner-controller/actions-runner-controller \
|
||||
--set webhookPort=10250
|
||||
@@ -92,7 +95,7 @@ To fix this, you may either:
|
||||
**Problem**
|
||||
|
||||
```json
|
||||
2020-11-12T22:17:30.693Z ERROR controller-runtime.controller Reconciler error
|
||||
2020-11-12T22:17:30.693Z ERROR controller-runtime.controller Reconciler error
|
||||
{
|
||||
"controller": "runner",
|
||||
"request": "actions-runner-system/runner-deployment-dk7q8-dk5c9",
|
||||
@@ -103,6 +106,7 @@ To fix this, you may either:
|
||||
**Solution**
|
||||
|
||||
Your base64'ed PAT token has a new line at the end, it needs to be created without a `\n` added, either:
|
||||
|
||||
* `echo -n $TOKEN | base64`
|
||||
* Create the secret as described in the docs using the shell and documented flags
|
||||
|
||||
@@ -110,7 +114,7 @@ Your base64'ed PAT token has a new line at the end, it needs to be created witho
|
||||
|
||||
**Problem**
|
||||
|
||||
```
|
||||
```text
|
||||
Error: UPGRADE FAILED: failed to create resource: Internal error occurred: failed calling webhook "webhook.cert-manager.io": failed to call webhook: Post "https://cert-manager-webhook.cert-manager.svc:443/mutate?timeout=10s": x509: certificate signed by unknown authority
|
||||
```
|
||||
|
||||
@@ -118,7 +122,7 @@ Apparently, it's failing while `helm` is creating one of resources defined in th
|
||||
|
||||
You'd try to tail logs from the `cert-manager-cainjector` and see it's failing with an error like:
|
||||
|
||||
```
|
||||
```text
|
||||
$ kubectl -n cert-manager logs cert-manager-cainjector-7cdbb9c945-g6bt4
|
||||
I0703 03:31:55.159339 1 start.go:91] "starting" version="v1.1.1" revision="3ac7418070e22c87fae4b22603a6b952f797ae96"
|
||||
I0703 03:31:55.615061 1 leaderelection.go:243] attempting to acquire leader lease kube-system/cert-manager-cainjector-leader-election...
|
||||
@@ -136,7 +140,7 @@ Your cluster is based on a new enough Kubernetes of version 1.22 or greater whic
|
||||
|
||||
In many cases, it's not an option to downgrade Kubernetes. So, just upgrade `cert-manager` to a more recent version that does have have the support for the specific Kubernetes version you're using.
|
||||
|
||||
See https://cert-manager.io/docs/installation/supported-releases/ for the list of available cert-manager versions.
|
||||
See <https://cert-manager.io/docs/installation/supported-releases/> for the list of available cert-manager versions.
|
||||
|
||||
## Operations
|
||||
|
||||
@@ -152,7 +156,7 @@ Sometimes either the runner kind (`kubectl get runners`) or it's underlying pod
|
||||
|
||||
Remove the finaliser from the relevent runner kind or pod
|
||||
|
||||
```
|
||||
```text
|
||||
# Get all kind runners and remove the finalizer
|
||||
$ kubectl get runners --no-headers | awk {'print $1'} | xargs kubectl patch runner --type merge -p '{"metadata":{"finalizers":null}}'
|
||||
|
||||
@@ -167,7 +171,7 @@ are in a namespace not shared with anything else_
|
||||
|
||||
**Problem**
|
||||
|
||||
ARC isn't involved in jobs actually getting allocated to a runner. ARC is responsible for orchestrating runners and the runner lifecycle. Why some people see large delays in job allocation is not clear however it has been https://github.com/actions-runner-controller/actions-runner-controller/issues/1387#issuecomment-1122593984 that this is caused from the self-update process somehow.
|
||||
ARC isn't involved in jobs actually getting allocated to a runner. ARC is responsible for orchestrating runners and the runner lifecycle. Why some people see large delays in job allocation is not clear however it has been confirmed https://github.com/actions/actions-runner-controller/issues/1387#issuecomment-1122593984 that this is caused from the self-update process somehow.
|
||||
|
||||
**Solution**
|
||||
|
||||
@@ -194,7 +198,7 @@ spec:
|
||||
If you're running your action runners on a service mesh like Istio, you might
|
||||
have problems with runner configuration accompanied by logs like:
|
||||
|
||||
```
|
||||
```text
|
||||
....
|
||||
runner Starting Runner listener with startup type: service
|
||||
runner Started listener process
|
||||
@@ -209,11 +213,11 @@ configuration script tries to communicate with the network.
|
||||
|
||||
More broadly, there are many other circumstances where the runner pod coming up first can cause issues.
|
||||
|
||||
**Solution**<br />
|
||||
**Solution**
|
||||
|
||||
> Added originally to help users with older istio instances.
|
||||
> Newer Istio instances can use Istio's `holdApplicationUntilProxyStarts` attribute ([istio/istio#11130](https://github.com/istio/istio/issues/11130)) to avoid having to delay starting up the runner.
|
||||
> Please read the discussion in [#592](https://github.com/actions-runner-controller/actions-runner-controller/pull/592) for more information.
|
||||
> Please read the discussion in [#592](https://github.com/actions/actions-runner-controller/pull/592) for more information.
|
||||
|
||||
You can add a delay to the runner's entrypoint script by setting the `STARTUP_DELAY_IN_SECONDS` environment variable for the runner pod. This will cause the script to sleep X seconds, this works with any runner kind.
|
||||
|
||||
@@ -231,7 +235,7 @@ spec:
|
||||
value: "5"
|
||||
```
|
||||
|
||||
## Outgoing network action hangs indefinitely
|
||||
### Outgoing network action hangs indefinitely
|
||||
|
||||
**Problem**
|
||||
|
||||
@@ -256,10 +260,30 @@ spec:
|
||||
env: []
|
||||
```
|
||||
|
||||
There may be more places you need to tweak for MTU.
|
||||
Please consult issues like #651 for more information.
|
||||
If the issue still persists, you can set the `ARC_DOCKER_MTU_PROPAGATION` to propagate the host MTU to networks created
|
||||
by the GitHub Runner. For instance:
|
||||
|
||||
## Unable to scale to zero with TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||
```yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
name: github-runner
|
||||
namespace: github-system
|
||||
spec:
|
||||
replicas: 6
|
||||
template:
|
||||
spec:
|
||||
dockerMTU: 1400
|
||||
repository: $username/$repo
|
||||
env:
|
||||
- name: ARC_DOCKER_MTU_PROPAGATION
|
||||
value: "true"
|
||||
```
|
||||
|
||||
You can read the discussion regarding this issue in
|
||||
[#1406](https://github.com/actions/actions-runner-controller/issues/1046).
|
||||
|
||||
### Unable to scale to zero with TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||
|
||||
**Problem**
|
||||
|
||||
@@ -267,6 +291,16 @@ HRA doesn't scale the RunnerDeployment to zero, even though you did configure HR
|
||||
|
||||
**Solution**
|
||||
|
||||
You very likely have some dangling workflow jobs stuck in `queued` or `in_progress` as seen in [#1057](https://github.com/actions-runner-controller/actions-runner-controller/issues/1057#issuecomment-1133439061).
|
||||
You very likely have some dangling workflow jobs stuck in `queued` or `in_progress` as seen in [#1057](https://github.com/actions/actions-runner-controller/issues/1057#issuecomment-1133439061).
|
||||
|
||||
Manually call [the "list workflow runs" API](https://docs.github.com/en/rest/actions/workflow-runs#list-workflow-runs-for-a-repository), and [remove the dangling workflow job(s)](https://docs.github.com/en/rest/actions/workflow-runs#delete-a-workflow-run).
|
||||
|
||||
### Slow / failure to boot dind sidecar (default runner)
|
||||
|
||||
**Problem**
|
||||
|
||||
If you noticed that it takes several minutes for sidecar dind container to be created or it exits with with error just after being created it might indicate that you are experiencing disk performance issue. You might see message `failed to reserve container name` when scaling up multiple runners at once. When you ssh on kubernetes node that problematic pods were scheduled on you can use tools like `atop`, `htop` or `iotop` to check IO usage and cpu time percentage used on iowait. If you see that disk usage is high (80-100%) and iowaits are taking a significant chunk of you cpu time (normally it should not be higher than 10%) it means that performance is being bottlenecked by slow disk.
|
||||
|
||||
**Solution**
|
||||
|
||||
The solution is to switch to using faster storage, if you are experiencing this issue you are probably using HDD storage. Switching to SSD storage fixed the problem in my case. Most cloud providers have a list of storage options to use just pick something faster that your current disk, for on prem clusters you will need to invest in some SSDs.
|
||||
|
||||
@@ -88,6 +88,9 @@ data:
|
||||
no-autoupdate: true
|
||||
ingress:
|
||||
# The first rule proxies traffic to the httpbin sample Service defined in app.yaml
|
||||
- hostname: ${TUNNEL_HOSTNAME}
|
||||
service: http://actions-runner-controller-actions-metrics-server.actions-runner-system:80
|
||||
path: /metrics$
|
||||
- hostname: ${TUNNEL_HOSTNAME}
|
||||
service: http://actions-runner-controller-github-webhook-server.actions-runner-system:80
|
||||
# This rule matches any traffic which didn't match a previous rule, and responds with HTTP 404.
|
||||
|
||||
@@ -35,14 +35,45 @@ else
|
||||
echo 'Skipped deploying secret "github-webhook-server". Set WEBHOOK_GITHUB_TOKEN to deploy.' 1>&2
|
||||
fi
|
||||
|
||||
if [ -n "${WEBHOOK_GITHUB_TOKEN}" ]; then
|
||||
kubectl -n actions-runner-system delete secret \
|
||||
actions-metrics-server || :
|
||||
kubectl -n actions-runner-system create secret generic \
|
||||
actions-metrics-server \
|
||||
--from-literal=github_token=${WEBHOOK_GITHUB_TOKEN:?WEBHOOK_GITHUB_TOKEN must not be empty}
|
||||
else
|
||||
echo 'Skipped deploying secret "actions-metrics-server". Set WEBHOOK_GITHUB_TOKEN to deploy.' 1>&2
|
||||
fi
|
||||
|
||||
tool=${ACCEPTANCE_TEST_DEPLOYMENT_TOOL}
|
||||
|
||||
TEST_ID=${TEST_ID:-default}
|
||||
|
||||
if [ "${tool}" == "helm" ]; then
|
||||
set -v
|
||||
|
||||
CHART=${CHART:-charts/actions-runner-controller}
|
||||
|
||||
flags=()
|
||||
if [ "${IMAGE_PULL_SECRET}" != "" ]; then
|
||||
flags+=( --set imagePullSecrets[0].name=${IMAGE_PULL_SECRET})
|
||||
flags+=( --set image.actionsRunnerImagePullSecrets[0].name=${IMAGE_PULL_SECRET})
|
||||
flags+=( --set githubWebhookServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET})
|
||||
flags+=( --set actionsMetricsServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET})
|
||||
fi
|
||||
if [ "${CHART_VERSION}" != "" ]; then
|
||||
flags+=( --version ${CHART_VERSION})
|
||||
fi
|
||||
if [ "${LOG_FORMAT}" != "" ]; then
|
||||
flags+=( --set logFormat=${LOG_FORMAT})
|
||||
flags+=( --set githubWebhookServer.logFormat=${LOG_FORMAT})
|
||||
flags+=( --set actionsMetricsServer.logFormat=${LOG_FORMAT})
|
||||
fi
|
||||
|
||||
set -vx
|
||||
|
||||
helm upgrade --install actions-runner-controller \
|
||||
charts/actions-runner-controller \
|
||||
${CHART} \
|
||||
-n actions-runner-system \
|
||||
--create-namespace \
|
||||
--set syncPeriod=${SYNC_PERIOD} \
|
||||
@@ -51,9 +82,8 @@ if [ "${tool}" == "helm" ]; then
|
||||
--set image.tag=${VERSION} \
|
||||
--set podAnnotations.test-id=${TEST_ID} \
|
||||
--set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \
|
||||
--set imagePullSecrets[0].name=${IMAGE_PULL_SECRET} \
|
||||
--set image.actionsRunnerImagePullSecrets[0].name=${IMAGE_PULL_SECRET} \
|
||||
--set githubWebhookServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET} \
|
||||
--set actionsMetricsServer.podAnnotations.test-id=${TEST_ID} \
|
||||
${flags[@]} --set image.imagePullPolicy=${IMAGE_PULL_POLICY} \
|
||||
-f ${VALUES_FILE}
|
||||
set +v
|
||||
# To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes`
|
||||
|
||||
@@ -6,6 +6,8 @@ OP=${OP:-apply}
|
||||
|
||||
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
|
||||
|
||||
cat acceptance/testdata/kubernetes_container_mode.envsubst.yaml | NAMESPACE=${RUNNER_NAMESPACE} envsubst | kubectl apply -f -
|
||||
|
||||
if [ -n "${TEST_REPO}" ]; then
|
||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerset envsubst | kubectl ${OP} -f -
|
||||
|
||||
86
acceptance/testdata/kubernetes_container_mode.envsubst.yaml
vendored
Normal file
86
acceptance/testdata/kubernetes_container_mode.envsubst.yaml
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
# USAGE:
|
||||
# cat acceptance/testdata/kubernetes_container_mode.envsubst.yaml | NAMESPACE=default envsubst | kubectl apply -f -
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: k8s-mode-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/exec"]
|
||||
verbs: ["get", "create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/log"]
|
||||
verbs: ["get", "list", "watch",]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
# Needed to report test success by crating a cm from within workflow job step
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create", "delete"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: runner-status-updater
|
||||
rules:
|
||||
- apiGroups: ["actions.summerwind.dev"]
|
||||
resources: ["runners/status"]
|
||||
verbs: ["get", "update", "patch"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: ${RUNNER_SERVICE_ACCOUNT_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
---
|
||||
# To verify it's working, try:
|
||||
# kubectl auth can-i --as system:serviceaccount:default:runner get pod
|
||||
# If incomplete, workflows and jobs would fail with an error message like:
|
||||
# Error: Error: The Service account needs the following permissions [{"group":"","verbs":["get","list","create","delete"],"resource":"pods","subresource":""},{"group":"","verbs":["get","create"],"resource":"pods","subresource":"exec"},{"group":"","verbs":["get","list","watch"],"resource":"pods","subresource":"log"},{"group":"batch","verbs":["get","list","create","delete"],"resource":"jobs","subresource":""},{"group":"","verbs":["create","delete","get","list"],"resource":"secrets","subresource":""}] on the pod resource in the 'default' namespace. Please contact your self hosted runner administrator.
|
||||
# Error: Process completed with exit code 1.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
# This role binding allows "jane" to read pods in the "default" namespace.
|
||||
# You need to already have a Role named "pod-reader" in that namespace.
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: runner-k8s-mode-runner
|
||||
namespace: ${NAMESPACE}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ${RUNNER_SERVICE_ACCOUNT_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: k8s-mode-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: runner-runner-stat-supdater
|
||||
namespace: ${NAMESPACE}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ${RUNNER_SERVICE_ACCOUNT_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: runner-status-updater
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: org-runnerdeploy-runner-work-dir
|
||||
labels:
|
||||
content: org-runnerdeploy-runner-work-dir
|
||||
provisioner: rancher.io/local-path
|
||||
reclaimPolicy: Delete
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
78
acceptance/testdata/runnerdeploy.envsubst.yaml
vendored
78
acceptance/testdata/runnerdeploy.envsubst.yaml
vendored
@@ -1,3 +1,23 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: ${NAME}-runner-work-dir
|
||||
labels:
|
||||
content: ${NAME}-runner-work-dir
|
||||
provisioner: rancher.io/local-path
|
||||
reclaimPolicy: Delete
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: ${NAME}-rootless-dind-work-dir
|
||||
labels:
|
||||
content: ${NAME}-rootless-dind-work-dir
|
||||
provisioner: rancher.io/local-path
|
||||
reclaimPolicy: Delete
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
---
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
@@ -39,10 +59,68 @@ spec:
|
||||
labels:
|
||||
- "${RUNNER_LABEL}"
|
||||
|
||||
serviceAccountName: ${RUNNER_SERVICE_ACCOUNT_NAME}
|
||||
terminationGracePeriodSeconds: ${RUNNER_TERMINATION_GRACE_PERIOD_SECONDS}
|
||||
|
||||
env:
|
||||
- name: RUNNER_GRACEFUL_STOP_TIMEOUT
|
||||
value: "${RUNNER_GRACEFUL_STOP_TIMEOUT}"
|
||||
- name: ROLLING_UPDATE_PHASE
|
||||
value: "${ROLLING_UPDATE_PHASE}"
|
||||
- name: ARC_DOCKER_MTU_PROPAGATION
|
||||
value: "true"
|
||||
# https://github.com/docker/docs/issues/8663
|
||||
- name: DOCKER_DEFAULT_ADDRESS_POOL_BASE
|
||||
value: "172.17.0.0/12"
|
||||
- name: DOCKER_DEFAULT_ADDRESS_POOL_SIZE
|
||||
value: "24"
|
||||
- name: WAIT_FOR_DOCKER_SECONDS
|
||||
value: "3"
|
||||
|
||||
dockerMTU: 1400
|
||||
dockerEnv:
|
||||
- name: RUNNER_GRACEFUL_STOP_TIMEOUT
|
||||
value: "${RUNNER_GRACEFUL_STOP_TIMEOUT}"
|
||||
|
||||
# Fix the following no space left errors with rootless-dind runners that can happen while running buildx build:
|
||||
# ------
|
||||
# > [4/5] RUN go mod download:
|
||||
# ------
|
||||
# ERROR: failed to solve: failed to prepare yxsw8lv9hqnuafzlfta244l0z: mkdir /home/runner/.local/share/docker/vfs/dir/yxsw8lv9hqnuafzlfta244l0z/usr/local/go/src/cmd/compile/internal/types2/testdata: no space left on device
|
||||
# Error: Process completed with exit code 1.
|
||||
#
|
||||
volumeMounts:
|
||||
- name: rootless-dind-work-dir
|
||||
# Omit the /share/docker part of the /home/runner/.local/share/docker as
|
||||
# that part is created by dockerd.
|
||||
mountPath: /home/runner/.local
|
||||
readOnly: false
|
||||
volumes:
|
||||
- name: rootless-dind-work-dir
|
||||
ephemeral:
|
||||
volumeClaimTemplate:
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: "${NAME}-rootless-dind-work-dir"
|
||||
resources:
|
||||
requests:
|
||||
storage: 3Gi
|
||||
|
||||
#
|
||||
# Non-standard working directory
|
||||
#
|
||||
# workDir: "/"
|
||||
|
||||
# # Uncomment the below to enable the kubernetes container mode
|
||||
# # See https://github.com/actions/actions-runner-controller#runner-with-k8s-jobs
|
||||
containerMode: ${RUNNER_CONTAINER_MODE}
|
||||
workVolumeClaimTemplate:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: "${NAME}-runner-work-dir"
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
---
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: HorizontalRunnerAutoscaler
|
||||
|
||||
75
acceptance/testdata/runnerset.envsubst.yaml
vendored
75
acceptance/testdata/runnerset.envsubst.yaml
vendored
@@ -54,6 +54,16 @@ provisioner: rancher.io/local-path
|
||||
reclaimPolicy: Retain
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: ${NAME}-rootless-dind-work-dir
|
||||
labels:
|
||||
content: ${NAME}-rootless-dind-work-dir
|
||||
provisioner: rancher.io/local-path
|
||||
reclaimPolicy: Delete
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
---
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerSet
|
||||
metadata:
|
||||
@@ -112,14 +122,27 @@ spec:
|
||||
labels:
|
||||
app: ${NAME}
|
||||
spec:
|
||||
serviceAccountName: ${RUNNER_SERVICE_ACCOUNT_NAME}
|
||||
terminationGracePeriodSeconds: ${RUNNER_TERMINATION_GRACE_PERIOD_SECONDS}
|
||||
containers:
|
||||
# # Uncomment only when non-dind-runner / you're using docker sidecar
|
||||
# - name: docker
|
||||
# # Image is required for the dind sidecar definition within RunnerSet spec
|
||||
# image: "docker:dind"
|
||||
# env:
|
||||
# - name: RUNNER_GRACEFUL_STOP_TIMEOUT
|
||||
# value: "${RUNNER_GRACEFUL_STOP_TIMEOUT}"
|
||||
- name: runner
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: RUNNER_GRACEFUL_STOP_TIMEOUT
|
||||
value: "${RUNNER_GRACEFUL_STOP_TIMEOUT}"
|
||||
- name: RUNNER_FEATURE_FLAG_EPHEMERAL
|
||||
value: "${RUNNER_FEATURE_FLAG_EPHEMERAL}"
|
||||
- name: GOMODCACHE
|
||||
value: "/home/runner/.cache/go-mod"
|
||||
- name: ROLLING_UPDATE_PHASE
|
||||
value: "${ROLLING_UPDATE_PHASE}"
|
||||
# PV-backed runner work dir
|
||||
volumeMounts:
|
||||
# Comment out the ephemeral work volume if you're going to test the kubernetes container mode
|
||||
@@ -152,20 +175,28 @@ spec:
|
||||
# https://github.com/actions/setup-go/blob/56a61c9834b4a4950dbbf4740af0b8a98c73b768/src/installer.ts#L144
|
||||
mountPath: "/opt/hostedtoolcache"
|
||||
# Valid only when dockerdWithinRunnerContainer=false
|
||||
- name: docker
|
||||
# PV-backed runner work dir
|
||||
volumeMounts:
|
||||
- name: work
|
||||
mountPath: /runner/_work
|
||||
# Cache docker image layers, in case dockerdWithinRunnerContainer=false
|
||||
- name: var-lib-docker
|
||||
mountPath: /var/lib/docker
|
||||
# image: mumoshu/actions-runner-dind:dev
|
||||
# - name: docker
|
||||
# # PV-backed runner work dir
|
||||
# volumeMounts:
|
||||
# - name: work
|
||||
# mountPath: /runner/_work
|
||||
# # Cache docker image layers, in case dockerdWithinRunnerContainer=false
|
||||
# - name: var-lib-docker
|
||||
# mountPath: /var/lib/docker
|
||||
# # image: mumoshu/actions-runner-dind:dev
|
||||
|
||||
# For buildx cache
|
||||
- name: cache
|
||||
mountPath: "/home/runner/.cache"
|
||||
# Comment out the ephemeral work volume if you're going to test the kubernetes container mode
|
||||
# # For buildx cache
|
||||
# - name: cache
|
||||
# mountPath: "/home/runner/.cache"
|
||||
|
||||
# For fixing no space left error on rootless dind runner
|
||||
- name: rootless-dind-work-dir
|
||||
# Omit the /share/docker part of the /home/runner/.local/share/docker as
|
||||
# that part is created by dockerd.
|
||||
mountPath: /home/runner/.local
|
||||
readOnly: false
|
||||
|
||||
# Comment out the ephemeral work volume if you're going to test the kubernetes container mode
|
||||
# volumes:
|
||||
# - name: work
|
||||
# ephemeral:
|
||||
@@ -177,6 +208,24 @@ spec:
|
||||
# resources:
|
||||
# requests:
|
||||
# storage: 10Gi
|
||||
|
||||
# Fix the following no space left errors with rootless-dind runners that can happen while running buildx build:
|
||||
# ------
|
||||
# > [4/5] RUN go mod download:
|
||||
# ------
|
||||
# ERROR: failed to solve: failed to prepare yxsw8lv9hqnuafzlfta244l0z: mkdir /home/runner/.local/share/docker/vfs/dir/yxsw8lv9hqnuafzlfta244l0z/usr/local/go/src/cmd/compile/internal/types2/testdata: no space left on device
|
||||
# Error: Process completed with exit code 1.
|
||||
#
|
||||
volumes:
|
||||
- name: rootless-dind-work-dir
|
||||
ephemeral:
|
||||
volumeClaimTemplate:
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: "${NAME}-rootless-dind-work-dir"
|
||||
resources:
|
||||
requests:
|
||||
storage: 3Gi
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: vol1
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
# Set actions-runner-controller settings for testing
|
||||
logLevel: "-4"
|
||||
imagePullSecrets:
|
||||
- name:
|
||||
imagePullSecrets: []
|
||||
image:
|
||||
actionsRunnerImagePullSecrets:
|
||||
- name:
|
||||
# This needs to be an empty array rather than a single-item array with empty name.
|
||||
# Otherwise you end up with the following error on helm-upgrade:
|
||||
# Error: UPGRADE FAILED: failed to create patch: map: map[] does not contain declared merge key: name && failed to create patch: map: map[] does not contain declared merge key: name
|
||||
actionsRunnerImagePullSecrets: []
|
||||
runner:
|
||||
statusUpdateHook:
|
||||
enabled: true
|
||||
rbac:
|
||||
allowGrantingKubernetesContainerModePermissions: true
|
||||
githubWebhookServer:
|
||||
imagePullSecrets:
|
||||
- name:
|
||||
imagePullSecrets: []
|
||||
logLevel: "-4"
|
||||
enabled: true
|
||||
labels: {}
|
||||
@@ -28,3 +33,23 @@ githubWebhookServer:
|
||||
protocol: TCP
|
||||
name: http
|
||||
nodePort: 31000
|
||||
actionsMetricsServer:
|
||||
imagePullSecrets: []
|
||||
logLevel: "-4"
|
||||
enabled: true
|
||||
labels: {}
|
||||
replicaCount: 1
|
||||
secret:
|
||||
enabled: true
|
||||
# create: true
|
||||
name: "actions-metrics-server"
|
||||
### GitHub Webhook Configuration
|
||||
#github_webhook_secret_token: ""
|
||||
service:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
nodePort: 31001
|
||||
|
||||
90
adrs/2022-10-17-runner-image.md
Normal file
90
adrs/2022-10-17-runner-image.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# ADR 0001: Produce the runner image for the scaleset client
|
||||
**Date**: 2022-10-17
|
||||
|
||||
**Status**: Done
|
||||
|
||||
# Context
|
||||
|
||||
user can bring their own runner images, the contract we have are:
|
||||
- It must have a runner binary under /actions-runner (/actions-runner/run.sh exists)
|
||||
- The WORKDIR is set to /actions-runner
|
||||
- If the user inside the container is root, the ENV RUNNER_ALLOW_RUNASROOT should be set to 1
|
||||
|
||||
The existing ARC runner images will not work with the new ARC mode out-of-box for the following reason:
|
||||
|
||||
- The current runner image requires caller to pass runner configure info, ex: URL and Config Token
|
||||
- The current runner image has the runner binary under /runner
|
||||
- The current runner image requires a special entrypoint script in order to work around some volume mount limitation for setting up DinD.
|
||||
|
||||
However, since we expose the raw runner Pod spec to our user, advanced user can modify the helm values.yaml to make everything lines up properly.
|
||||
|
||||
# Guiding Principles
|
||||
|
||||
- Build image is separated in two stages.
|
||||
|
||||
## The first stage (build)
|
||||
- Reuses the same base image, so it is faster to build.
|
||||
- Installs utilities needed to download assets (runner and runner-container-hooks).
|
||||
- Downloads the runner and stores it into `/actions-runner` directory.
|
||||
- Downloads the runner-container-hooks and stores it into `/actions-runner/k8s` directory.
|
||||
- You can use build arguments to control the runner version, the target platform and runner container hooks version.
|
||||
|
||||
Preview:
|
||||
|
||||
```Dockerfile
|
||||
FROM mcr.microsoft.com/dotnet/runtime-deps:6.0 as build
|
||||
|
||||
ARG RUNNER_ARCH="x64"
|
||||
ARG RUNNER_VERSION=2.298.2
|
||||
ARG RUNNER_CONTAINER_HOOKS_VERSION=0.1.3
|
||||
|
||||
RUN apt update -y && apt install curl unzip -y
|
||||
|
||||
WORKDIR /actions-runner
|
||||
RUN curl -f -L -o runner.tar.gz https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-${RUNNER_ARCH}-${RUNNER_VERSION}.tar.gz \
|
||||
&& tar xzf ./runner.tar.gz \
|
||||
&& rm runner.tar.gz
|
||||
|
||||
RUN curl -f -L -o runner-container-hooks.zip https://github.com/actions/runner-container-hooks/releases/download/v${RUNNER_CONTAINER_HOOKS_VERSION}/actions-runner-hooks-k8s-${RUNNER_CONTAINER_HOOKS_VERSION}.zip \
|
||||
&& unzip ./runner-container-hooks.zip -d ./k8s \
|
||||
&& rm runner-container-hooks.zip
|
||||
```
|
||||
|
||||
## The main image:
|
||||
- Copies assets from the build stage to `/actions-runner`
|
||||
- Does not provide an entrypoint. The entrypoint should be set within the container definition.
|
||||
|
||||
Preview:
|
||||
|
||||
```Dockerfile
|
||||
FROM mcr.microsoft.com/dotnet/runtime-deps:6.0
|
||||
|
||||
WORKDIR /actions-runner
|
||||
COPY --from=build /actions-runner .
|
||||
```
|
||||
|
||||
## Example of pod spec with the init container copying assets
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: <name>
|
||||
spec:
|
||||
containers:
|
||||
- name: runner
|
||||
image: <image>
|
||||
command: ["/runner/run.sh"]
|
||||
volumeMounts:
|
||||
- name: runner
|
||||
mountPath: /runner
|
||||
initContainers:
|
||||
- name: setup
|
||||
image: <image>
|
||||
command: ["sh", "-c", "cp -r /actions-runner/* /runner/"]
|
||||
volumeMounts:
|
||||
- name: runner
|
||||
mountPath: /runner
|
||||
volumes:
|
||||
- name: runner
|
||||
emptyDir: {}
|
||||
```
|
||||
54
adrs/2022-10-27-runnerscaleset-lifetime.md
Normal file
54
adrs/2022-10-27-runnerscaleset-lifetime.md
Normal file
@@ -0,0 +1,54 @@
|
||||
# ADR 0003: Lifetime of RunnerScaleSet on Service
|
||||
|
||||
**Date**: 2022-10-27
|
||||
|
||||
**Status**: Done
|
||||
|
||||
## Context
|
||||
|
||||
We have created the RunnerScaleSet object and APIs around it on the GitHub Actions service for better support of any self-hosted runner auto-scale solution, like [actions-runner-controller](https://github.com/actions-runner-controller/actions-runner-controller).
|
||||
|
||||
The `RunnerScaleSet` object will represent a set of homogeneous self-hosted runners to the Actions service job routing system.
|
||||
|
||||
A `RunnerScaleSet` client (ARC) needs to communicate with the Actions service via HTTP long-poll in a certain protocol to get a workflow job successfully landed on one of its homogeneous self-hosted runners.
|
||||
|
||||
In this ADR, I want to discuss the following within the context of actions-runner-controller's new scaling mode:
|
||||
- Who and how to create a RunnerScaleSet on the service?
|
||||
- Who and how to delete a RunnerScaleSet on the service?
|
||||
- What will happen to all the runners and jobs when the deletion happens?
|
||||
|
||||
## RunnerScaleSet creation
|
||||
|
||||
- `AutoScalingRunnerSet` custom resource controller will create the `RunnerScaleSet` object in the Actions service on any `AutoScalingRunnerSet` resource deployment.
|
||||
- The creation is via REST API on Actions service `POST _apis/runtime/runnerscalesets`
|
||||
- The creation needs to use the runner registration token (admin).
|
||||
- `RunnerScaleSet.Name` == `AutoScalingRunnerSet.metadata.Name`
|
||||
- The created `RunnerScaleSet` will only have 1 label and it's the `RunnerScaleSet`'s name
|
||||
- `AutoScalingRunnerSet` controller will store the `RunnerScaleSet.Id` as an annotation on the k8s resource for future lookup.
|
||||
|
||||
## RunnerScaleSet modification
|
||||
|
||||
- When the user patch existing `AutoScalingRunnerSet`'s RunnerScaleSet related properly, ex: `runnerGroupName`, `runnerWorkDir`, the controller needs to make an HTTP PATCH call to the `_apis/runtime/runnerscalesets/2` endpoint in order to update the object on the service.
|
||||
- We will put the deployed `AutoScalingRunnerSet` resource in an error state when the user tries to patch the resource with a different `githubConfigUrl`
|
||||
> Basically, you can't move a deployed `AutoScalingRunnerSet` across GitHub entity, repoA->repoB, repoA->OrgC, etc.
|
||||
> We evaluated blocking the change before instead of erroring at runtime and that we decided not to go down this route because it forces us to re-introduce admission webhooks (require cert-manager).
|
||||
|
||||
## RunnerScaleSet deletion
|
||||
|
||||
- `AutoScalingRunnerSet` custom resource controller will delete the `RunnerScaleSet` object in the Actions service on any `AutoScalingRunnerSet` resource deletion.
|
||||
> `AutoScalingRunnerSet` deletion will contain several steps:
|
||||
> - Stop the listener app so no more new jobs coming and no more scaling up/down.
|
||||
> - Request scale down to 0
|
||||
> - Force stop all runners
|
||||
> - Wait for the scale down to 0
|
||||
> - Delete the `RunnerScaleSet` object from service via REST API
|
||||
- The deletion is via REST API on Actions service `DELETE _apis/runtime/runnerscalesets/1`
|
||||
- The deletion needs to use the runner registration token (admin).
|
||||
|
||||
The user's `RunnerScaleSet` will be deleted from the service by `DormantRunnerScaleSetCleanupJob` if the particular `AutoScalingRunnerSet` has not connected to the service for the past 7 days. We have a similar rule for self-hosted runners.
|
||||
|
||||
## Jobs and Runners on deletion
|
||||
|
||||
- `RunnerScaleSet` deletion will be blocked if there is any job assigned to a runner within the `RunnerScaleSet`, which has to scale down to 0 before deletion.
|
||||
- Any job that has been assigned to the `RunnerScaleSet` but hasn't been assigned to a runner within the `RunnerScaleSet` will get thrown back to the queue and wait for assignment again.
|
||||
- Any offline runners within the `RunnerScaleSet` will be deleted from the service side.
|
||||
51
adrs/2022-11-04-crd-api-group-name.md
Normal file
51
adrs/2022-11-04-crd-api-group-name.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# ADR 0004: Technical detail about actions-runner-controller repository transfer
|
||||
**Date**: 2022-11-04
|
||||
|
||||
**Status**: Done
|
||||
|
||||
# Context
|
||||
|
||||
As part of ARC Private Beta: Repository Migration & Open Sourcing Process, we have decided to transfer the current [actions-runner-controller repository](https://github.com/actions-runner-controller/actions-runner-controller) into the [Actions org](https://github.com/actions).
|
||||
|
||||
**Goals:**
|
||||
- A clear signal that GitHub will start taking over ARC and provide support.
|
||||
- Since we are going to deprecate the existing auto-scale mode in ARC at some point, we want to have a clear separation between the legacy mode (not supported) and the new mode (supported).
|
||||
- Avoid disrupting users as much as we can, existing ARC users will not notice any difference after the repository transfer, they can keep upgrading to the newer version of ARC and keep using the legacy mode.
|
||||
|
||||
**Challenges**
|
||||
- The original creator's name (`summerwind`) is all over the place, including some critical parts of ARC:
|
||||
- The k8s user resource API's full name is `actions.summerwind.dev/v1alpha1/RunnerDeployment`, renaming it to `actions.github.com` is a breaking change and will force the user to rebuild their entire k8s cluster.
|
||||
- All docker images around ARC (controller + default runner) is published to [dockerhub/summerwind](https://hub.docker.com/u/summerwind)
|
||||
- The helm chart for ARC is currently hosted on [GitHub pages](https://actions-runner-controller.github.io/actions-runner-controller) for https://github.com/actions-runner-controller/actions-runner-controller, moving the repository means we will break users who install ARC via the helm chart
|
||||
|
||||
|
||||
# Decisions
|
||||
|
||||
## APIs group names for k8s custom resources, `actions.summerwind` or `actions.github`
|
||||
|
||||
- We will not rename any existing ARC resources API name after moving the repository under Actions org. (keep `summerwind` for old stuff)
|
||||
- For any new resource API we are going to add, those will be named properly under GitHub, ex: `actions.github.com/v1alpha1/AutoScalingRunnerSet`
|
||||
|
||||
Benefits:
|
||||
- A clear separation from existing ARC:
|
||||
- Easy for the support engineer to triage income tickets and figure out whether we need to support the use case from the user
|
||||
- We won't break existing users when they upgrade to a newer version of ARC after the repository transfer
|
||||
|
||||
Based on the spike done by `@nikola-jokic`, we have confidence that we can host multiple resources with different API names under the same repository, and the published ARC controller can handle both resources properly.
|
||||
|
||||
## ARC Docker images
|
||||
|
||||
We will not start using the GitHub container registry for hosting ARC images (controller + runner images) right after the repository transfer.
|
||||
|
||||
But over time, we will start using GHCR for hosting those images along with our deprecation story.
|
||||
|
||||
## Helm chart
|
||||
|
||||
We will recreate the https://github.com/actions-runner-controller/actions-runner-controller repository after the repository transfer.
|
||||
|
||||
The recreated repository will only contain the helm chart assets which keep powering the https://actions-runner-controller.github.io/actions-runner-controller for users to install ARC via Helm.
|
||||
|
||||
Long term, we will switch to hosting the helm chart on GHCR (OCI) instead of using GitHub Pages.
|
||||
|
||||
This will require a one-time change to our users by running
|
||||
`helm repo remove actions-runner-controller` and `helm repo add actions-runner-controller oci://ghcr.io/actions`
|
||||
80
adrs/2022-12-05-adding-labels-k8s-resources.md
Normal file
80
adrs/2022-12-05-adding-labels-k8s-resources.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# ADR 0007: Adding labels to our resources
|
||||
|
||||
**Date**: 2022-12-05
|
||||
|
||||
**Status**: Done
|
||||
|
||||
## Context
|
||||
|
||||
users need to provide us with logs so that we can help support and troubleshoot their issues. We need a way for our users to filter and retrieve the logs we need.
|
||||
|
||||
## Proposal
|
||||
|
||||
A good start would be a catch-all label to get all logs that are
|
||||
ARC-related: one of the [recommended labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/)
|
||||
is `app.kubernetes.io/part-of` and we can set that for all ARC components
|
||||
to be `actions-runner-controller`.
|
||||
|
||||
Assuming standard logging that would allow us to get all ARC logs by running
|
||||
|
||||
```bash
|
||||
kubectl logs -l 'app.kubernetes.io/part-of=actions-runner-controller'
|
||||
```
|
||||
which would be very useful for development to begin with.
|
||||
|
||||
The proposal is to add these sets of labels to the pods ARC creates:
|
||||
|
||||
#### controller-manager
|
||||
Labels to be set by the Helm chart:
|
||||
```yaml
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/part-of: actions-runner-controller
|
||||
app.kubernetes.io/component: controller-manager
|
||||
app.kubernetes.io/version: "x.x.x"
|
||||
```
|
||||
|
||||
#### Listener
|
||||
Labels to be set by controller at creation:
|
||||
```yaml
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/part-of: actions-runner-controller
|
||||
app.kubernetes.io/component: runner-scale-set-listener
|
||||
app.kubernetes.io/version: "x.x.x"
|
||||
actions.github.com/scale-set-name: scale-set-name # this corresponds to metadata.name as set for AutoscalingRunnerSet
|
||||
|
||||
# the following labels are to be extracted by the config URL
|
||||
actions.github.com/enterprise: enterprise
|
||||
actions.github.com/organization: organization
|
||||
actions.github.com/repository: repository
|
||||
```
|
||||
|
||||
#### Runner
|
||||
Labels to be set by controller at creation:
|
||||
```yaml
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/part-of: actions-runner-controller
|
||||
app.kubernetes.io/component: runner
|
||||
app.kubernetes.io/version: "x.x.x"
|
||||
actions.github.com/scale-set-name: scale-set-name # this corresponds to metadata.name as set for AutoscalingRunnerSet
|
||||
actions.github.com/runner-name: runner-name
|
||||
actions.github.com/runner-group-name: runner-group-name
|
||||
|
||||
# the following labels are to be extracted by the config URL
|
||||
actions.github.com/enterprise: enterprise
|
||||
actions.github.com/organization: organization
|
||||
actions.github.com/repository: repository
|
||||
```
|
||||
|
||||
This would allow us to ask users:
|
||||
|
||||
> Can you please send us the logs coming from pods labelled 'app.kubernetes.io/part-of=actions-runner-controller'?
|
||||
|
||||
Or for example if they're having problems specifically with runners:
|
||||
|
||||
> Can you please send us the logs coming from pods labelled 'app.kubernetes.io/component=runner'?
|
||||
|
||||
This way users don't have to understand ARC moving parts but we still have a
|
||||
way to target them specifically if we need to.
|
||||
88
adrs/2022-12-27-pick-the-right-runner-to-scale-down.md
Normal file
88
adrs/2022-12-27-pick-the-right-runner-to-scale-down.md
Normal file
@@ -0,0 +1,88 @@
|
||||
# ADR 0008: Pick the right runner to scale down
|
||||
**Date**: 2022-12-27
|
||||
|
||||
**Status**: Done
|
||||
|
||||
## Context
|
||||
|
||||
- A custom resource `EphemeralRunnerSet` manage a set of custom resource `EphemeralRunners`
|
||||
- The `EphemeralRunnerSet` has `Replicas` in its `Spec`, and the responsibility of the `EphemeralRunnerSet_controller` is to reconcile a given `EphemeralRunnerSet` to have
|
||||
the same amount of `EphemeralRunners` as the `Spec.Replicas` defined.
|
||||
- This means the `EphemeralRunnerSet_controller` will scale up the `EphemeralRunnerSet` by creating more `EphemeralRunner` in the case of the `Spec.Replicas` is higher than
|
||||
the current amount of `EphemeralRunners`.
|
||||
- This also means the `EphemeralRunnerSet_controller` will scale down the `EphemeralRunnerSet` by finding some existing `EphemeralRunner` to delete in the case of
|
||||
the `Spec.Replicas` is less than the current amount of `EphemeralRunners`.
|
||||
|
||||
This ADR is about how can we find the right existing `EphemeralRunner` to delete when we need to scale down.
|
||||
|
||||
|
||||
## Current approach
|
||||
|
||||
1. `EphemeralRunnerSet_controller` figure out how many `EphemeralRunner` it needs to delete, ex: need to scale down from 10 to 2 means we need to delete 8 `EphemeralRunner`
|
||||
|
||||
2. `EphemeralRunnerSet_controller` find all `EphemeralRunner` that is in the `Running` or `Pending` phase.
|
||||
> `Pending` means the `EphemeralRunner` is still probably creating and a runner has not yet configured with the Actions service.
|
||||
> `Running` means the `EphemeralRunner` is created and a runner has probably configured with Actions service, the runner may sit there idle,
|
||||
> or maybe actively running a workflow job. We don't have a clear answer for it from the ARC side. (Actions service knows it for sure)
|
||||
|
||||
3. `EphemeralRunnerSet_controller` make an HTTP DELETE request to the Actions service for each `EphemeralRunner` from the previous step and ask the Actions service to delete the runner via `RunnerId`.
|
||||
(The `RunnerId` is generated after the runner registered with the Actions service, and stored on the `EphemeralRunner.Status.RunnerId`)
|
||||
> - The HTTP DELETE request looks like the following:
|
||||
> `DELETE https://pipelines.actions.githubusercontent.com/WoxlUxJHrKEzIp4Nz3YmrmLlZBonrmj9xCJ1lrzcJ9ZsD1Tnw7/_apis/distributedtask/pools/0/agents/1024`
|
||||
> The Actions service will return 2 types of responses:
|
||||
> 1. 204 (No Content): The runner with Id 1024 has been successfully removed from the service or the runner with Id 1024 doesn't exist.
|
||||
> 2. 400 (Bad Request) with JSON body that contains an error message like `JobStillRunningException`: The service can't remove this runner at this point since it has been
|
||||
> assigned to a job request, the client won't be able to remove the runner until the runner finishes its current assigned job request.
|
||||
|
||||
4. `EphemeralRunnerSet_controller` will ignore any deletion error from runners that are still running a job, and keep trying deletion until the amount of `204` equals the amount of
|
||||
`EphemeralRunner` needs to delete.
|
||||
|
||||
## The problem with the current approach
|
||||
|
||||
In a busy `AutoScalingRunnerSet`, the scale up and down may happen all the time as jobs are queued up and jobs finished.
|
||||
|
||||
We will make way too many HTTP requests to the Actions service and ask it to try to delete a certain runner, and rely on the exception from the service to figure out what to do next.
|
||||
|
||||
The runner deletion request is not cheap to the service, for synchronization, the `JobStillRunningException` is raised from the DB call for the request.
|
||||
|
||||
So we are wasting resources on both the Actions service (extra load to the database) and the actions-runner-controller (useless outgoing HTTP requests).
|
||||
|
||||
In the test ARC that I deployed to Azure, the ARC controller tried to delete RunnerId 12408 for `bbq-beets/ting-test` a total of 35 times within 10 minutes.
|
||||
|
||||
## Root cause
|
||||
|
||||
The `EphemeralRunnerSet_controller` doesn't know whether a given `EphemeralRunner` is actually running a workflow job or not
|
||||
(it only knows the runner is configured at the service), so it can't filter out the `EphemeralRunner`.
|
||||
|
||||
## Additional context
|
||||
|
||||
The legacy ARC's custom resource allows the runner image to leverage the RunnerJobHook feature to update the status of the runner custom resource in K8S (Mark the runner as running workflow run Id XXX).
|
||||
|
||||
This brings a good value to users as it can provide some insight about which runner is running which job for all the runners in the cluster and it looks pretty close to what we want to fix the [root cause](#root-cause)
|
||||
|
||||
However, the legacy ARC approach means the service account for running the runner pod needs to have elevated permission to update the custom resource,
|
||||
this would be a big `NO` from a security point of view since we may not trust the code running inside the runner pod.
|
||||
|
||||
## Possible Solution
|
||||
|
||||
The nature of the k8s controller-runtime means we might reconcile the resource base on stale cache data.
|
||||
|
||||
I think our goal for the solution should be:
|
||||
- Reduce wasteful HTTP requests on a scale-down as much as we can.
|
||||
- We can accept that we might make 1 or 2 wasteful requests to Actions service, but we can't accept making 5/10+ of them.
|
||||
- See if we can meet feature parity with what the RunnerJobHook support with compromise any security concerns.
|
||||
|
||||
Since the root cause of why the reconciliation can't skip an `EphemeralRunner` is that we don't know whether an `EphemeralRunner` is running a job,
|
||||
a simple thought is how about we somehow attach some info to the `EphemeralRunner` to indicate it's currently running a job?
|
||||
|
||||
How about we send this info from the service to the auto-scaling-listener via the existing HTTP long-poll
|
||||
and let the listener patch the `EphemeralRunner.Status` to indicate it's running a job?
|
||||
> The listener is normally in a separate namespace with elevated permission and it's something we can trust.
|
||||
|
||||
Changes:
|
||||
- Introduce a new message type `JobStarted` (in addition to the existing `JobAvailable/JobAssigned/JobCompleted`) on the service side, the message is sent when a runner of the `RunnerScaleSet` get assigned to a job,
|
||||
`RequestId`, `RunnerId`, and `RunnerName` will be included in the message.
|
||||
- Add `RequestId (int)` to `EphemeralRunner.Status`, this will indicate which job the runner is running.
|
||||
- The `AutoScalingListener` will base on the payload of this new message to patch `EphemeralRunners/RunnerName/Status` with the `RequestId`
|
||||
- When `EphemeralRunnerSet_controller` try to find `EphemeralRunner` to delete on a scale down, it will skip any `EphemeralRunner` that has `EphemeralRunner.Status.RequestId` set.
|
||||
- In the future, we can expose more info to this `JobStarted` message and introduce more property under `EphemeralRunner.Status` to reach feature parity with legacy ARC's RunnerJobHook
|
||||
39
adrs/2023-02-02-automate-runner-updates.md
Normal file
39
adrs/2023-02-02-automate-runner-updates.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# Automate updating runner version
|
||||
|
||||
**Status**: Proposed
|
||||
|
||||
## Context
|
||||
|
||||
When a new [runner](https://github.com/actions/runner) version is released, new
|
||||
images need to be built in
|
||||
[actions-runner-controller/releases](https://github.com/actions-runner-controller/releases).
|
||||
This is currently started by the
|
||||
[release-runners](https://github.com/actions/actions-runner-controller/blob/master/.github/workflows/release-runners.yaml)
|
||||
workflow, although this only starts when the set of file containing the runner
|
||||
version is updated (and this is currently done manually).
|
||||
|
||||
## Decision
|
||||
|
||||
We can have another workflow running on a cadence (hourly seems sensible) and checking for new runner
|
||||
releases, creating a PR updating `RUNNER_VERSION` in:
|
||||
- `.github/workflows/release-runners.yaml`
|
||||
- `Makefile`
|
||||
- `runner/Makefile`
|
||||
- `test/e2e/e2e_test.go`
|
||||
|
||||
Once that PR is merged, the existing workflow will pick things up.
|
||||
|
||||
## Consequences
|
||||
|
||||
We don't have to add an extra step to the runner release process and a direct
|
||||
dependency on ARC. Since images won't be built until the generated PR is merged
|
||||
we still have room to wait before triggering a build should there be any
|
||||
problems with the runner release.
|
||||
|
||||
## Considered alternatives
|
||||
|
||||
We also considered firing the workflow to create the PR via
|
||||
`repository_dispatch` as part of the release process of runner itself, but we
|
||||
discarded it because that would have required a PAT or a GitHub app with `repo`
|
||||
scope within the Actions org and would have added a new direct dependency on the
|
||||
runner side.
|
||||
18
adrs/yyyy-mm-dd-TEMPLATE.md
Normal file
18
adrs/yyyy-mm-dd-TEMPLATE.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# Title
|
||||
|
||||
<!-- ADR titles should typically be imperative sentences. -->
|
||||
|
||||
**Status**: (Proposed|Accepted|Rejected|Superceded|Deprecated)
|
||||
|
||||
## Context
|
||||
|
||||
*What is the issue or background knowledge necessary for future readers
|
||||
to understand why this ADR was written?*
|
||||
|
||||
## Decision
|
||||
|
||||
**What** is the change being proposed? / **How** will it be implemented?*
|
||||
|
||||
## Consequences
|
||||
|
||||
*What becomes easier or more difficult to do because of this change?*
|
||||
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
Copyright 2020 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// AutoscalingListenerSpec defines the desired state of AutoscalingListener
|
||||
type AutoscalingListenerSpec struct {
|
||||
// Required
|
||||
GitHubConfigUrl string `json:"githubConfigUrl,omitempty"`
|
||||
|
||||
// Required
|
||||
GitHubConfigSecret string `json:"githubConfigSecret,omitempty"`
|
||||
|
||||
// Required
|
||||
RunnerScaleSetId int `json:"runnerScaleSetId,omitempty"`
|
||||
|
||||
// Required
|
||||
AutoscalingRunnerSetNamespace string `json:"autoscalingRunnerSetNamespace,omitempty"`
|
||||
|
||||
// Required
|
||||
AutoscalingRunnerSetName string `json:"autoscalingRunnerSetName,omitempty"`
|
||||
|
||||
// Required
|
||||
EphemeralRunnerSetName string `json:"ephemeralRunnerSetName,omitempty"`
|
||||
|
||||
// Required
|
||||
// +kubebuilder:validation:Minimum:=0
|
||||
MaxRunners int `json:"maxRunners,omitempty"`
|
||||
|
||||
// Required
|
||||
// +kubebuilder:validation:Minimum:=0
|
||||
MinRunners int `json:"minRunners,omitempty"`
|
||||
|
||||
// Required
|
||||
Image string `json:"image,omitempty"`
|
||||
|
||||
// Required
|
||||
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
|
||||
|
||||
// +optional
|
||||
Proxy *ProxyConfig `json:"proxy,omitempty"`
|
||||
}
|
||||
|
||||
// AutoscalingListenerStatus defines the observed state of AutoscalingListener
|
||||
type AutoscalingListenerStatus struct{}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
//+kubebuilder:printcolumn:JSONPath=".spec.githubConfigUrl",name=GitHub Configure URL,type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=".spec.autoscalingRunnerSetNamespace",name=AutoscalingRunnerSet Namespace,type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=".spec.autoscalingRunnerSetName",name=AutoscalingRunnerSet Name,type=string
|
||||
|
||||
// AutoscalingListener is the Schema for the autoscalinglisteners API
|
||||
type AutoscalingListener struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec AutoscalingListenerSpec `json:"spec,omitempty"`
|
||||
Status AutoscalingListenerStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// AutoscalingListenerList contains a list of AutoscalingListener
|
||||
type AutoscalingListenerList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []AutoscalingListener `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&AutoscalingListener{}, &AutoscalingListenerList{})
|
||||
}
|
||||
234
apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go
Normal file
234
apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go
Normal file
@@ -0,0 +1,234 @@
|
||||
/*
|
||||
Copyright 2020 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/actions/actions-runner-controller/hash"
|
||||
"golang.org/x/net/http/httpproxy"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
//+kubebuilder:printcolumn:JSONPath=".spec.minRunners",name=Minimum Runners,type=number
|
||||
//+kubebuilder:printcolumn:JSONPath=".spec.maxRunners",name=Maximum Runners,type=number
|
||||
//+kubebuilder:printcolumn:JSONPath=".status.currentRunners",name=Current Runners,type=number
|
||||
//+kubebuilder:printcolumn:JSONPath=".status.state",name=State,type=string
|
||||
|
||||
// AutoscalingRunnerSet is the Schema for the autoscalingrunnersets API
|
||||
type AutoscalingRunnerSet struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec AutoscalingRunnerSetSpec `json:"spec,omitempty"`
|
||||
Status AutoscalingRunnerSetStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// AutoscalingRunnerSetSpec defines the desired state of AutoscalingRunnerSet
|
||||
type AutoscalingRunnerSetSpec struct {
|
||||
// Required
|
||||
GitHubConfigUrl string `json:"githubConfigUrl,omitempty"`
|
||||
|
||||
// Required
|
||||
GitHubConfigSecret string `json:"githubConfigSecret,omitempty"`
|
||||
|
||||
// +optional
|
||||
RunnerGroup string `json:"runnerGroup,omitempty"`
|
||||
|
||||
// +optional
|
||||
Proxy *ProxyConfig `json:"proxy,omitempty"`
|
||||
|
||||
// +optional
|
||||
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"`
|
||||
|
||||
// Required
|
||||
Template corev1.PodTemplateSpec `json:"template,omitempty"`
|
||||
|
||||
// +optional
|
||||
// +kubebuilder:validation:Minimum:=0
|
||||
MaxRunners *int `json:"maxRunners,omitempty"`
|
||||
|
||||
// +optional
|
||||
// +kubebuilder:validation:Minimum:=0
|
||||
MinRunners *int `json:"minRunners,omitempty"`
|
||||
}
|
||||
|
||||
type GitHubServerTLSConfig struct {
|
||||
// Required
|
||||
RootCAsConfigMapRef string `json:"certConfigMapRef,omitempty"`
|
||||
}
|
||||
|
||||
type ProxyConfig struct {
|
||||
// +optional
|
||||
HTTP *ProxyServerConfig `json:"http,omitempty"`
|
||||
|
||||
// +optional
|
||||
HTTPS *ProxyServerConfig `json:"https,omitempty"`
|
||||
|
||||
// +optional
|
||||
NoProxy []string `json:"noProxy,omitempty"`
|
||||
}
|
||||
|
||||
func (c *ProxyConfig) toHTTPProxyConfig(secretFetcher func(string) (*corev1.Secret, error)) (*httpproxy.Config, error) {
|
||||
config := &httpproxy.Config{
|
||||
NoProxy: strings.Join(c.NoProxy, ","),
|
||||
}
|
||||
|
||||
if c.HTTP != nil {
|
||||
u, err := url.Parse(c.HTTP.Url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse proxy http url %q: %w", c.HTTP.Url, err)
|
||||
}
|
||||
|
||||
if c.HTTP.CredentialSecretRef != "" {
|
||||
secret, err := secretFetcher(c.HTTP.CredentialSecretRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to get secret %s for http proxy: %w",
|
||||
c.HTTP.CredentialSecretRef,
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
u.User = url.UserPassword(
|
||||
string(secret.Data["username"]),
|
||||
string(secret.Data["password"]),
|
||||
)
|
||||
}
|
||||
|
||||
config.HTTPProxy = u.String()
|
||||
}
|
||||
|
||||
if c.HTTPS != nil {
|
||||
u, err := url.Parse(c.HTTPS.Url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse proxy https url %q: %w", c.HTTPS.Url, err)
|
||||
}
|
||||
|
||||
if c.HTTPS.CredentialSecretRef != "" {
|
||||
secret, err := secretFetcher(c.HTTPS.CredentialSecretRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to get secret %s for https proxy: %w",
|
||||
c.HTTPS.CredentialSecretRef,
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
u.User = url.UserPassword(
|
||||
string(secret.Data["username"]),
|
||||
string(secret.Data["password"]),
|
||||
)
|
||||
}
|
||||
|
||||
config.HTTPSProxy = u.String()
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func (c *ProxyConfig) ToSecretData(secretFetcher func(string) (*corev1.Secret, error)) (map[string][]byte, error) {
|
||||
config, err := c.toHTTPProxyConfig(secretFetcher)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data := map[string][]byte{}
|
||||
data["http_proxy"] = []byte(config.HTTPProxy)
|
||||
data["https_proxy"] = []byte(config.HTTPSProxy)
|
||||
data["no_proxy"] = []byte(config.NoProxy)
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (c *ProxyConfig) ProxyFunc(secretFetcher func(string) (*corev1.Secret, error)) (func(*http.Request) (*url.URL, error), error) {
|
||||
config, err := c.toHTTPProxyConfig(secretFetcher)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proxyFunc := func(req *http.Request) (*url.URL, error) {
|
||||
return config.ProxyFunc()(req.URL)
|
||||
}
|
||||
|
||||
return proxyFunc, nil
|
||||
}
|
||||
|
||||
type ProxyServerConfig struct {
|
||||
// Required
|
||||
Url string `json:"url,omitempty"`
|
||||
|
||||
// +optional
|
||||
CredentialSecretRef string `json:"credentialSecretRef,omitempty"`
|
||||
}
|
||||
|
||||
// AutoscalingRunnerSetStatus defines the observed state of AutoscalingRunnerSet
|
||||
type AutoscalingRunnerSetStatus struct {
|
||||
// +optional
|
||||
CurrentRunners int `json:"currentRunners,omitempty"`
|
||||
|
||||
// +optional
|
||||
State string `json:"state,omitempty"`
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) ListenerSpecHash() string {
|
||||
type listenerSpec = AutoscalingRunnerSetSpec
|
||||
arsSpec := ars.Spec.DeepCopy()
|
||||
spec := arsSpec
|
||||
return hash.ComputeTemplateHash(&spec)
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) RunnerSetSpecHash() string {
|
||||
type runnerSetSpec struct {
|
||||
GitHubConfigUrl string
|
||||
GitHubConfigSecret string
|
||||
RunnerGroup string
|
||||
Proxy *ProxyConfig
|
||||
GitHubServerTLS *GitHubServerTLSConfig
|
||||
Template corev1.PodTemplateSpec
|
||||
}
|
||||
spec := &runnerSetSpec{
|
||||
GitHubConfigUrl: ars.Spec.GitHubConfigUrl,
|
||||
GitHubConfigSecret: ars.Spec.GitHubConfigSecret,
|
||||
RunnerGroup: ars.Spec.RunnerGroup,
|
||||
Proxy: ars.Spec.Proxy,
|
||||
GitHubServerTLS: ars.Spec.GitHubServerTLS,
|
||||
Template: ars.Spec.Template,
|
||||
}
|
||||
return hash.ComputeTemplateHash(&spec)
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// AutoscalingRunnerSetList contains a list of AutoscalingRunnerSet
|
||||
type AutoscalingRunnerSetList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []AutoscalingRunnerSet `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&AutoscalingRunnerSet{}, &AutoscalingRunnerSetList{})
|
||||
}
|
||||
133
apis/actions.github.com/v1alpha1/ephemeralrunner_types.go
Normal file
133
apis/actions.github.com/v1alpha1/ephemeralrunner_types.go
Normal file
@@ -0,0 +1,133 @@
|
||||
/*
|
||||
Copyright 2020 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.githubConfigUrl",name="GitHub Config URL",type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.runnerId",name=RunnerId,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.phase",name=Status,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.jobRepositoryName",name=JobRepository,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.jobWorkflowRef",name=JobWorkflowRef,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.workflowRunId",name=WorkflowRunId,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.jobDisplayName",name=JobDisplayName,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.message",name=Message,type=string
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||
|
||||
// EphemeralRunner is the Schema for the ephemeralrunners API
|
||||
type EphemeralRunner struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec EphemeralRunnerSpec `json:"spec,omitempty"`
|
||||
Status EphemeralRunnerStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// EphemeralRunnerSpec defines the desired state of EphemeralRunner
|
||||
type EphemeralRunnerSpec struct {
|
||||
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
|
||||
// +required
|
||||
GitHubConfigUrl string `json:"githubConfigUrl,omitempty"`
|
||||
|
||||
// +required
|
||||
GitHubConfigSecret string `json:"githubConfigSecret,omitempty"`
|
||||
|
||||
// +required
|
||||
RunnerScaleSetId int `json:"runnerScaleSetId,omitempty"`
|
||||
|
||||
// +optional
|
||||
Proxy *ProxyConfig `json:"proxy,omitempty"`
|
||||
|
||||
// +optional
|
||||
ProxySecretRef string `json:"proxySecretRef,omitempty"`
|
||||
|
||||
// +optional
|
||||
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"`
|
||||
|
||||
// +required
|
||||
corev1.PodTemplateSpec `json:",inline"`
|
||||
}
|
||||
|
||||
// EphemeralRunnerStatus defines the observed state of EphemeralRunner
|
||||
type EphemeralRunnerStatus struct {
|
||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
|
||||
// Turns true only if the runner is online.
|
||||
// +optional
|
||||
Ready bool `json:"ready"`
|
||||
// Phase describes phases where EphemeralRunner can be in.
|
||||
// The underlying type is a PodPhase, but the meaning is more restrictive
|
||||
//
|
||||
// The PodFailed phase should be set only when EphemeralRunner fails to start
|
||||
// after multiple retries. That signals that this EphemeralRunner won't work,
|
||||
// and manual inspection is required
|
||||
//
|
||||
// The PodSucceded phase should be set only when confirmed that EphemeralRunner
|
||||
// actually executed the job and has been removed from the service.
|
||||
// +optional
|
||||
Phase corev1.PodPhase `json:"phase,omitempty"`
|
||||
// +optional
|
||||
Reason string `json:"reason,omitempty"`
|
||||
// +optional
|
||||
Message string `json:"message,omitempty"`
|
||||
|
||||
// +optional
|
||||
RunnerId int `json:"runnerId,omitempty"`
|
||||
// +optional
|
||||
RunnerName string `json:"runnerName,omitempty"`
|
||||
// +optional
|
||||
RunnerJITConfig string `json:"runnerJITConfig,omitempty"`
|
||||
|
||||
// +optional
|
||||
Failures map[string]bool `json:"failures,omitempty"`
|
||||
|
||||
// +optional
|
||||
JobRequestId int64 `json:"jobRequestId,omitempty"`
|
||||
|
||||
// +optional
|
||||
JobRepositoryName string `json:"jobRepositoryName,omitempty"`
|
||||
|
||||
// +optional
|
||||
JobWorkflowRef string `json:"jobWorkflowRef,omitempty"`
|
||||
|
||||
// +optional
|
||||
WorkflowRunId int64 `json:"workflowRunId,omitempty"`
|
||||
|
||||
// +optional
|
||||
JobDisplayName string `json:"jobDisplayName,omitempty"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// EphemeralRunnerList contains a list of EphemeralRunner
|
||||
type EphemeralRunnerList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []EphemeralRunner `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&EphemeralRunner{}, &EphemeralRunnerList{})
|
||||
}
|
||||
61
apis/actions.github.com/v1alpha1/ephemeralrunnerset_types.go
Normal file
61
apis/actions.github.com/v1alpha1/ephemeralrunnerset_types.go
Normal file
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
Copyright 2020 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// EphemeralRunnerSetSpec defines the desired state of EphemeralRunnerSet
|
||||
type EphemeralRunnerSetSpec struct {
|
||||
// Replicas is the number of desired EphemeralRunner resources in the k8s namespace.
|
||||
Replicas int `json:"replicas,omitempty"`
|
||||
|
||||
EphemeralRunnerSpec EphemeralRunnerSpec `json:"ephemeralRunnerSpec,omitempty"`
|
||||
}
|
||||
|
||||
// EphemeralRunnerSetStatus defines the observed state of EphemeralRunnerSet
|
||||
type EphemeralRunnerSetStatus struct {
|
||||
// CurrentReplicas is the number of currently running EphemeralRunner resources being managed by this EphemeralRunnerSet.
|
||||
CurrentReplicas int `json:"currentReplicas,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name="DesiredReplicas",type="integer"
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.currentReplicas", name="CurrentReplicas",type="integer"
|
||||
// EphemeralRunnerSet is the Schema for the ephemeralrunnersets API
|
||||
type EphemeralRunnerSet struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec EphemeralRunnerSetSpec `json:"spec,omitempty"`
|
||||
Status EphemeralRunnerSetStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// EphemeralRunnerSetList contains a list of EphemeralRunnerSet
|
||||
type EphemeralRunnerSetList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []EphemeralRunnerSet `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&EphemeralRunnerSet{}, &EphemeralRunnerSetList{})
|
||||
}
|
||||
36
apis/actions.github.com/v1alpha1/groupversion_info.go
Normal file
36
apis/actions.github.com/v1alpha1/groupversion_info.go
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2020 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package v1 contains API Schema definitions for the batch v1 API group
|
||||
// +kubebuilder:object:generate=true
|
||||
// +groupName=actions.github.com
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||
)
|
||||
|
||||
var (
|
||||
// GroupVersion is group version used to register these objects
|
||||
GroupVersion = schema.GroupVersion{Group: "actions.github.com", Version: "v1alpha1"}
|
||||
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
|
||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||
|
||||
// AddToScheme adds the types in this group-version to the given scheme.
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
118
apis/actions.github.com/v1alpha1/proxy_config_test.go
Normal file
118
apis/actions.github.com/v1alpha1/proxy_config_test.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package v1alpha1_test
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestProxyConfig_ToSecret(t *testing.T) {
|
||||
config := &v1alpha1.ProxyConfig{
|
||||
HTTP: &v1alpha1.ProxyServerConfig{
|
||||
Url: "http://proxy.example.com:8080",
|
||||
CredentialSecretRef: "my-secret",
|
||||
},
|
||||
HTTPS: &v1alpha1.ProxyServerConfig{
|
||||
Url: "https://proxy.example.com:8080",
|
||||
CredentialSecretRef: "my-secret",
|
||||
},
|
||||
NoProxy: []string{
|
||||
"noproxy.example.com",
|
||||
"noproxy2.example.com",
|
||||
},
|
||||
}
|
||||
|
||||
secretFetcher := func(string) (*corev1.Secret, error) {
|
||||
return &corev1.Secret{
|
||||
Data: map[string][]byte{
|
||||
"username": []byte("username"),
|
||||
"password": []byte("password"),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
result, err := config.ToSecretData(secretFetcher)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, result)
|
||||
|
||||
assert.Equal(t, "http://username:password@proxy.example.com:8080", string(result["http_proxy"]))
|
||||
assert.Equal(t, "https://username:password@proxy.example.com:8080", string(result["https_proxy"]))
|
||||
assert.Equal(t, "noproxy.example.com,noproxy2.example.com", string(result["no_proxy"]))
|
||||
}
|
||||
|
||||
func TestProxyConfig_ProxyFunc(t *testing.T) {
|
||||
config := &v1alpha1.ProxyConfig{
|
||||
HTTP: &v1alpha1.ProxyServerConfig{
|
||||
Url: "http://proxy.example.com:8080",
|
||||
CredentialSecretRef: "my-secret",
|
||||
},
|
||||
HTTPS: &v1alpha1.ProxyServerConfig{
|
||||
Url: "https://proxy.example.com:8080",
|
||||
CredentialSecretRef: "my-secret",
|
||||
},
|
||||
NoProxy: []string{
|
||||
"noproxy.example.com",
|
||||
"noproxy2.example.com",
|
||||
},
|
||||
}
|
||||
|
||||
secretFetcher := func(string) (*corev1.Secret, error) {
|
||||
return &corev1.Secret{
|
||||
Data: map[string][]byte{
|
||||
"username": []byte("username"),
|
||||
"password": []byte("password"),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
result, err := config.ProxyFunc(secretFetcher)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{
|
||||
name: "http target",
|
||||
in: "http://target.com",
|
||||
out: "http://username:password@proxy.example.com:8080",
|
||||
},
|
||||
{
|
||||
name: "https target",
|
||||
in: "https://target.com",
|
||||
out: "https://username:password@proxy.example.com:8080",
|
||||
},
|
||||
{
|
||||
name: "no proxy",
|
||||
in: "https://noproxy.example.com",
|
||||
out: "",
|
||||
},
|
||||
{
|
||||
name: "no proxy 2",
|
||||
in: "https://noproxy2.example.com",
|
||||
out: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", test.in, nil)
|
||||
require.NoError(t, err)
|
||||
u, err := result(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
if test.out == "" {
|
||||
assert.Nil(t, u)
|
||||
return
|
||||
}
|
||||
|
||||
assert.Equal(t, test.out, u.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
493
apis/actions.github.com/v1alpha1/zz_generated.deepcopy.go
Normal file
493
apis/actions.github.com/v1alpha1/zz_generated.deepcopy.go
Normal file
@@ -0,0 +1,493 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2020 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AutoscalingListener) DeepCopyInto(out *AutoscalingListener) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingListener.
|
||||
func (in *AutoscalingListener) DeepCopy() *AutoscalingListener {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AutoscalingListener)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *AutoscalingListener) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AutoscalingListenerList) DeepCopyInto(out *AutoscalingListenerList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]AutoscalingListener, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingListenerList.
|
||||
func (in *AutoscalingListenerList) DeepCopy() *AutoscalingListenerList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AutoscalingListenerList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *AutoscalingListenerList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AutoscalingListenerSpec) DeepCopyInto(out *AutoscalingListenerSpec) {
|
||||
*out = *in
|
||||
if in.ImagePullSecrets != nil {
|
||||
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
|
||||
*out = make([]v1.LocalObjectReference, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Proxy != nil {
|
||||
in, out := &in.Proxy, &out.Proxy
|
||||
*out = new(ProxyConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingListenerSpec.
|
||||
func (in *AutoscalingListenerSpec) DeepCopy() *AutoscalingListenerSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AutoscalingListenerSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AutoscalingListenerStatus) DeepCopyInto(out *AutoscalingListenerStatus) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingListenerStatus.
|
||||
func (in *AutoscalingListenerStatus) DeepCopy() *AutoscalingListenerStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AutoscalingListenerStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AutoscalingRunnerSet) DeepCopyInto(out *AutoscalingRunnerSet) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingRunnerSet.
|
||||
func (in *AutoscalingRunnerSet) DeepCopy() *AutoscalingRunnerSet {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AutoscalingRunnerSet)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *AutoscalingRunnerSet) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AutoscalingRunnerSetList) DeepCopyInto(out *AutoscalingRunnerSetList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]AutoscalingRunnerSet, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingRunnerSetList.
|
||||
func (in *AutoscalingRunnerSetList) DeepCopy() *AutoscalingRunnerSetList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AutoscalingRunnerSetList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *AutoscalingRunnerSetList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AutoscalingRunnerSetSpec) DeepCopyInto(out *AutoscalingRunnerSetSpec) {
|
||||
*out = *in
|
||||
if in.Proxy != nil {
|
||||
in, out := &in.Proxy, &out.Proxy
|
||||
*out = new(ProxyConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.GitHubServerTLS != nil {
|
||||
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
|
||||
*out = new(GitHubServerTLSConfig)
|
||||
**out = **in
|
||||
}
|
||||
in.Template.DeepCopyInto(&out.Template)
|
||||
if in.MaxRunners != nil {
|
||||
in, out := &in.MaxRunners, &out.MaxRunners
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
if in.MinRunners != nil {
|
||||
in, out := &in.MinRunners, &out.MinRunners
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingRunnerSetSpec.
|
||||
func (in *AutoscalingRunnerSetSpec) DeepCopy() *AutoscalingRunnerSetSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AutoscalingRunnerSetSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AutoscalingRunnerSetStatus) DeepCopyInto(out *AutoscalingRunnerSetStatus) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingRunnerSetStatus.
|
||||
func (in *AutoscalingRunnerSetStatus) DeepCopy() *AutoscalingRunnerSetStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AutoscalingRunnerSetStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralRunner) DeepCopyInto(out *EphemeralRunner) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunner.
|
||||
func (in *EphemeralRunner) DeepCopy() *EphemeralRunner {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralRunner)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *EphemeralRunner) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralRunnerList) DeepCopyInto(out *EphemeralRunnerList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]EphemeralRunner, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerList.
|
||||
func (in *EphemeralRunnerList) DeepCopy() *EphemeralRunnerList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralRunnerList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *EphemeralRunnerList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralRunnerSet) DeepCopyInto(out *EphemeralRunnerSet) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerSet.
|
||||
func (in *EphemeralRunnerSet) DeepCopy() *EphemeralRunnerSet {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralRunnerSet)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *EphemeralRunnerSet) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralRunnerSetList) DeepCopyInto(out *EphemeralRunnerSetList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]EphemeralRunnerSet, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerSetList.
|
||||
func (in *EphemeralRunnerSetList) DeepCopy() *EphemeralRunnerSetList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralRunnerSetList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *EphemeralRunnerSetList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralRunnerSetSpec) DeepCopyInto(out *EphemeralRunnerSetSpec) {
|
||||
*out = *in
|
||||
in.EphemeralRunnerSpec.DeepCopyInto(&out.EphemeralRunnerSpec)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerSetSpec.
|
||||
func (in *EphemeralRunnerSetSpec) DeepCopy() *EphemeralRunnerSetSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralRunnerSetSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralRunnerSetStatus) DeepCopyInto(out *EphemeralRunnerSetStatus) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerSetStatus.
|
||||
func (in *EphemeralRunnerSetStatus) DeepCopy() *EphemeralRunnerSetStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralRunnerSetStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralRunnerSpec) DeepCopyInto(out *EphemeralRunnerSpec) {
|
||||
*out = *in
|
||||
if in.Proxy != nil {
|
||||
in, out := &in.Proxy, &out.Proxy
|
||||
*out = new(ProxyConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.GitHubServerTLS != nil {
|
||||
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
|
||||
*out = new(GitHubServerTLSConfig)
|
||||
**out = **in
|
||||
}
|
||||
in.PodTemplateSpec.DeepCopyInto(&out.PodTemplateSpec)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerSpec.
|
||||
func (in *EphemeralRunnerSpec) DeepCopy() *EphemeralRunnerSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralRunnerSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralRunnerStatus) DeepCopyInto(out *EphemeralRunnerStatus) {
|
||||
*out = *in
|
||||
if in.Failures != nil {
|
||||
in, out := &in.Failures, &out.Failures
|
||||
*out = make(map[string]bool, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerStatus.
|
||||
func (in *EphemeralRunnerStatus) DeepCopy() *EphemeralRunnerStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralRunnerStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GitHubServerTLSConfig) DeepCopyInto(out *GitHubServerTLSConfig) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubServerTLSConfig.
|
||||
func (in *GitHubServerTLSConfig) DeepCopy() *GitHubServerTLSConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GitHubServerTLSConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) {
|
||||
*out = *in
|
||||
if in.HTTP != nil {
|
||||
in, out := &in.HTTP, &out.HTTP
|
||||
*out = new(ProxyServerConfig)
|
||||
**out = **in
|
||||
}
|
||||
if in.HTTPS != nil {
|
||||
in, out := &in.HTTPS, &out.HTTPS
|
||||
*out = new(ProxyServerConfig)
|
||||
**out = **in
|
||||
}
|
||||
if in.NoProxy != nil {
|
||||
in, out := &in.NoProxy, &out.NoProxy
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig.
|
||||
func (in *ProxyConfig) DeepCopy() *ProxyConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ProxyConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ProxyServerConfig) DeepCopyInto(out *ProxyServerConfig) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyServerConfig.
|
||||
func (in *ProxyServerConfig) DeepCopy() *ProxyServerConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ProxyServerConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
@@ -60,6 +60,9 @@ type HorizontalRunnerAutoscalerSpec struct {
|
||||
// The earlier a scheduled override is, the higher it is prioritized.
|
||||
// +optional
|
||||
ScheduledOverrides []ScheduledOverride `json:"scheduledOverrides,omitempty"`
|
||||
|
||||
// +optional
|
||||
GitHubAPICredentialsFrom *GitHubAPICredentialsFrom `json:"githubAPICredentialsFrom,omitempty"`
|
||||
}
|
||||
|
||||
type ScaleUpTrigger struct {
|
||||
@@ -130,7 +133,7 @@ type ScaleTargetRef struct {
|
||||
|
||||
type MetricSpec struct {
|
||||
// Type is the type of metric to be used for autoscaling.
|
||||
// The only supported Type is TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||
// It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
|
||||
Type string `json:"type,omitempty"`
|
||||
|
||||
// RepositoryNames is the list of repository names to be used for calculating the metric.
|
||||
@@ -170,7 +173,7 @@ type MetricSpec struct {
|
||||
}
|
||||
|
||||
// ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||
// A schedule can optionally be recurring, so that the correspoding override happens every day, week, month, or year.
|
||||
// A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
|
||||
type ScheduledOverride struct {
|
||||
// StartTime is the time at which the first override starts.
|
||||
StartTime metav1.Time `json:"startTime"`
|
||||
@@ -76,6 +76,16 @@ type RunnerConfig struct {
|
||||
|
||||
// +optional
|
||||
ContainerMode string `json:"containerMode,omitempty"`
|
||||
|
||||
GitHubAPICredentialsFrom *GitHubAPICredentialsFrom `json:"githubAPICredentialsFrom,omitempty"`
|
||||
}
|
||||
|
||||
type GitHubAPICredentialsFrom struct {
|
||||
SecretRef SecretReference `json:"secretRef,omitempty"`
|
||||
}
|
||||
|
||||
type SecretReference struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// RunnerPodSpec defines the desired pod spec fields of the runner pod
|
||||
@@ -160,6 +170,9 @@ type RunnerPodSpec struct {
|
||||
// +optional
|
||||
RuntimeClassName *string `json:"runtimeClassName,omitempty"`
|
||||
|
||||
// +optional
|
||||
DnsPolicy corev1.DNSPolicy `json:"dnsPolicy,omitempty"`
|
||||
|
||||
// +optional
|
||||
DnsConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"`
|
||||
|
||||
@@ -183,11 +196,6 @@ func (rs *RunnerSpec) Validate(rootPath *field.Path) field.ErrorList {
|
||||
errList = append(errList, field.Invalid(rootPath.Child("workVolumeClaimTemplate"), rs.WorkVolumeClaimTemplate, err.Error()))
|
||||
}
|
||||
|
||||
err = rs.validateIsServiceAccountNameSet()
|
||||
if err != nil {
|
||||
errList = append(errList, field.Invalid(rootPath.Child("serviceAccountName"), rs.ServiceAccountName, err.Error()))
|
||||
}
|
||||
|
||||
return errList
|
||||
}
|
||||
|
||||
@@ -226,17 +234,6 @@ func (rs *RunnerSpec) validateWorkVolumeClaimTemplate() error {
|
||||
return rs.WorkVolumeClaimTemplate.validate()
|
||||
}
|
||||
|
||||
func (rs *RunnerSpec) validateIsServiceAccountNameSet() error {
|
||||
if rs.ContainerMode != "kubernetes" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if rs.ServiceAccountName == "" {
|
||||
return errors.New("service account name is required if container mode is kubernetes")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunnerStatus defines the observed state of Runner
|
||||
type RunnerStatus struct {
|
||||
// Turns true only if the runner pod is ready.
|
||||
@@ -251,10 +248,60 @@ type RunnerStatus struct {
|
||||
// +optional
|
||||
Message string `json:"message,omitempty"`
|
||||
// +optional
|
||||
WorkflowStatus *WorkflowStatus `json:"workflow"`
|
||||
// +optional
|
||||
// +nullable
|
||||
LastRegistrationCheckTime *metav1.Time `json:"lastRegistrationCheckTime,omitempty"`
|
||||
}
|
||||
|
||||
// WorkflowStatus contains various information that is propagated
|
||||
// from GitHub Actions workflow run environment variables to
|
||||
// ease monitoring workflow run/job/steps that are triggerred on the runner.
|
||||
type WorkflowStatus struct {
|
||||
// +optional
|
||||
// Name is the name of the workflow
|
||||
// that is triggerred within the runner.
|
||||
// It corresponds to GITHUB_WORKFLOW defined in
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
Name string `json:"name,omitempty"`
|
||||
// +optional
|
||||
// Repository is the owner and repository name of the workflow
|
||||
// that is triggerred within the runner.
|
||||
// It corresponds to GITHUB_REPOSITORY defined in
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
Repository string `json:"repository,omitempty"`
|
||||
// +optional
|
||||
// ReositoryOwner is the repository owner's name for the workflow
|
||||
// that is triggerred within the runner.
|
||||
// It corresponds to GITHUB_REPOSITORY_OWNER defined in
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
RepositoryOwner string `json:"repositoryOwner,omitempty"`
|
||||
// +optional
|
||||
// GITHUB_RUN_NUMBER is the unique number for the current workflow run
|
||||
// that is triggerred within the runner.
|
||||
// It corresponds to GITHUB_RUN_ID defined in
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
RunNumber string `json:"runNumber,omitempty"`
|
||||
// +optional
|
||||
// RunID is the unique number for the current workflow run
|
||||
// that is triggerred within the runner.
|
||||
// It corresponds to GITHUB_RUN_ID defined in
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
RunID string `json:"runID,omitempty"`
|
||||
// +optional
|
||||
// Job is the name of the current job
|
||||
// that is triggerred within the runner.
|
||||
// It corresponds to GITHUB_JOB defined in
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
Job string `json:"job,omitempty"`
|
||||
// +optional
|
||||
// Action is the name of the current action or the step ID of the current step
|
||||
// that is triggerred within the runner.
|
||||
// It corresponds to GITHUB_ACTION defined in
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
Action string `json:"action,omitempty"`
|
||||
}
|
||||
|
||||
// RunnerStatusRegistration contains runner registration status
|
||||
type RunnerStatusRegistration struct {
|
||||
Enterprise string `json:"enterprise,omitempty"`
|
||||
@@ -315,8 +362,12 @@ func (w *WorkVolumeClaimTemplate) V1VolumeMount(mountPath string) corev1.VolumeM
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.enterprise",name=Enterprise,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.organization",name=Organization,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.repository",name=Repository,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.group",name=Group,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.labels",name=Labels,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.phase",name=Status,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.message",name=Message,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.workflow.repository",name=WF Repo,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.workflow.runID",name=WF Run,type=string
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||
|
||||
// Runner is the Schema for the runners API
|
||||
@@ -338,11 +389,7 @@ func (r Runner) IsRegisterable() bool {
|
||||
}
|
||||
|
||||
now := metav1.Now()
|
||||
if r.Status.Registration.ExpiresAt.Before(&now) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return !r.Status.Registration.ExpiresAt.Before(&now)
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
@@ -33,7 +33,7 @@ type RunnerDeploymentSpec struct {
|
||||
|
||||
// EffectiveTime is the time the upstream controller requested to sync Replicas.
|
||||
// It is usually populated by the webhook-based autoscaler via HRA.
|
||||
// The value is inherited to RunnerRepicaSet(s) and used to prevent ephemeral runners from unnecessarily recreated.
|
||||
// The value is inherited to RunnerReplicaSet(s) and used to prevent ephemeral runners from unnecessarily recreated.
|
||||
//
|
||||
// +optional
|
||||
// +nullable
|
||||
@@ -90,6 +90,22 @@ func (in *CheckRunSpec) DeepCopy() *CheckRunSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GitHubAPICredentialsFrom) DeepCopyInto(out *GitHubAPICredentialsFrom) {
|
||||
*out = *in
|
||||
out.SecretRef = in.SecretRef
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubAPICredentialsFrom.
|
||||
func (in *GitHubAPICredentialsFrom) DeepCopy() *GitHubAPICredentialsFrom {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GitHubAPICredentialsFrom)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GitHubEventScaleUpTriggerSpec) DeepCopyInto(out *GitHubEventScaleUpTriggerSpec) {
|
||||
*out = *in
|
||||
@@ -231,6 +247,11 @@ func (in *HorizontalRunnerAutoscalerSpec) DeepCopyInto(out *HorizontalRunnerAuto
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.GitHubAPICredentialsFrom != nil {
|
||||
in, out := &in.GitHubAPICredentialsFrom, &out.GitHubAPICredentialsFrom
|
||||
*out = new(GitHubAPICredentialsFrom)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerSpec.
|
||||
@@ -425,6 +446,11 @@ func (in *RunnerConfig) DeepCopyInto(out *RunnerConfig) {
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.GitHubAPICredentialsFrom != nil {
|
||||
in, out := &in.GitHubAPICredentialsFrom, &out.GitHubAPICredentialsFrom
|
||||
*out = new(GitHubAPICredentialsFrom)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerConfig.
|
||||
@@ -1023,6 +1049,11 @@ func (in *RunnerSpec) DeepCopy() *RunnerSpec {
|
||||
func (in *RunnerStatus) DeepCopyInto(out *RunnerStatus) {
|
||||
*out = *in
|
||||
in.Registration.DeepCopyInto(&out.Registration)
|
||||
if in.WorkflowStatus != nil {
|
||||
in, out := &in.WorkflowStatus, &out.WorkflowStatus
|
||||
*out = new(WorkflowStatus)
|
||||
**out = **in
|
||||
}
|
||||
if in.LastRegistrationCheckTime != nil {
|
||||
in, out := &in.LastRegistrationCheckTime, &out.LastRegistrationCheckTime
|
||||
*out = (*in).DeepCopy()
|
||||
@@ -1136,6 +1167,21 @@ func (in *ScheduledOverride) DeepCopy() *ScheduledOverride {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
|
||||
func (in *SecretReference) DeepCopy() *SecretReference {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SecretReference)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkVolumeClaimTemplate) DeepCopyInto(out *WorkVolumeClaimTemplate) {
|
||||
*out = *in
|
||||
@@ -1171,3 +1217,18 @@ func (in *WorkflowJobSpec) DeepCopy() *WorkflowJobSpec {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkflowStatus) DeepCopyInto(out *WorkflowStatus) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowStatus.
|
||||
func (in *WorkflowStatus) DeepCopy() *WorkflowStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkflowStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
4
build/version.go
Normal file
4
build/version.go
Normal file
@@ -0,0 +1,4 @@
|
||||
package build
|
||||
|
||||
// This is overridden at build-time using go-build ldflags. dev is the fallback value
|
||||
var Version = "NA"
|
||||
@@ -1,4 +1,5 @@
|
||||
# This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow
|
||||
all: true
|
||||
lint-conf: charts/.ci/lint-config.yaml
|
||||
chart-repos:
|
||||
- jetstack=https://charts.jetstack.io
|
||||
|
||||
23
charts/actions-runner-controller-2/.helmignore
Normal file
23
charts/actions-runner-controller-2/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
33
charts/actions-runner-controller-2/Chart.yaml
Normal file
33
charts/actions-runner-controller-2/Chart.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
apiVersion: v2
|
||||
name: actions-runner-controller-2
|
||||
description: A Helm chart for install actions-runner-controller CRD
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.2.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.2.0"
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
sources:
|
||||
- "https://github.com/actions/actions-runner-controller"
|
||||
|
||||
maintainers:
|
||||
- name: actions
|
||||
url: https://github.com/actions
|
||||
@@ -0,0 +1,120 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.7.0
|
||||
creationTimestamp: null
|
||||
name: autoscalinglisteners.actions.github.com
|
||||
spec:
|
||||
group: actions.github.com
|
||||
names:
|
||||
kind: AutoscalingListener
|
||||
listKind: AutoscalingListenerList
|
||||
plural: autoscalinglisteners
|
||||
singular: autoscalinglistener
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.githubConfigUrl
|
||||
name: GitHub Configure URL
|
||||
type: string
|
||||
- jsonPath: .spec.autoscalingRunnerSetNamespace
|
||||
name: AutoscalingRunnerSet Namespace
|
||||
type: string
|
||||
- jsonPath: .spec.autoscalingRunnerSetName
|
||||
name: AutoscalingRunnerSet Name
|
||||
type: string
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: AutoscalingListener is the Schema for the autoscalinglisteners API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: AutoscalingListenerSpec defines the desired state of AutoscalingListener
|
||||
properties:
|
||||
autoscalingRunnerSetName:
|
||||
description: Required
|
||||
type: string
|
||||
autoscalingRunnerSetNamespace:
|
||||
description: Required
|
||||
type: string
|
||||
ephemeralRunnerSetName:
|
||||
description: Required
|
||||
type: string
|
||||
githubConfigSecret:
|
||||
description: Required
|
||||
type: string
|
||||
githubConfigUrl:
|
||||
description: Required
|
||||
type: string
|
||||
image:
|
||||
description: Required
|
||||
type: string
|
||||
imagePullSecrets:
|
||||
description: Required
|
||||
items:
|
||||
description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.
|
||||
properties:
|
||||
name:
|
||||
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
maxRunners:
|
||||
description: Required
|
||||
minimum: 0
|
||||
type: integer
|
||||
minRunners:
|
||||
description: Required
|
||||
minimum: 0
|
||||
type: integer
|
||||
proxy:
|
||||
properties:
|
||||
http:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
https:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
noProxy:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
runnerScaleSetId:
|
||||
description: Required
|
||||
type: integer
|
||||
type: object
|
||||
status:
|
||||
description: AutoscalingListenerStatus defines the observed state of AutoscalingListener
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
3
charts/actions-runner-controller-2/templates/NOTES.txt
Normal file
3
charts/actions-runner-controller-2/templates/NOTES.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
Thank you for installing {{ .Chart.Name }}.
|
||||
|
||||
Your release is named {{ .Release.Name }}.
|
||||
97
charts/actions-runner-controller-2/templates/_helpers.tpl
Normal file
97
charts/actions-runner-controller-2/templates/_helpers.tpl
Normal file
@@ -0,0 +1,97 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "actions-runner-controller-2.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "actions-runner-controller-2.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "actions-runner-controller-2.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "actions-runner-controller-2.labels" -}}
|
||||
helm.sh/chart: {{ include "actions-runner-controller-2.chart" . }}
|
||||
{{ include "actions-runner-controller-2.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/part-of: {{ .Chart.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- range $k, $v := .Values.labels }}
|
||||
{{ $k }}: {{ $v }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "actions-runner-controller-2.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "actions-runner-controller-2.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "actions-runner-controller-2.serviceAccountName" -}}
|
||||
{{- if eq .Values.serviceAccount.name "default"}}
|
||||
{{- fail "serviceAccount.name cannot be set to 'default'" }}
|
||||
{{- end }}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "actions-runner-controller-2.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- if not .Values.serviceAccount.name }}
|
||||
{{- fail "serviceAccount.name must be set if serviceAccount.create is false" }}
|
||||
{{- else }}
|
||||
{{- .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "actions-runner-controller-2.managerRoleName" -}}
|
||||
{{- include "actions-runner-controller-2.fullname" . }}-manager-role
|
||||
{{- end }}
|
||||
|
||||
{{- define "actions-runner-controller-2.managerRoleBinding" -}}
|
||||
{{- include "actions-runner-controller-2.fullname" . }}-manager-rolebinding
|
||||
{{- end }}
|
||||
|
||||
{{- define "actions-runner-controller-2.leaderElectionRoleName" -}}
|
||||
{{- include "actions-runner-controller-2.fullname" . }}-leader-election-role
|
||||
{{- end }}
|
||||
|
||||
{{- define "actions-runner-controller-2.leaderElectionRoleBinding" -}}
|
||||
{{- include "actions-runner-controller-2.fullname" . }}-leader-election-rolebinding
|
||||
{{- end }}
|
||||
|
||||
{{- define "actions-runner-controller-2.imagePullSecretsNames" -}}
|
||||
{{- $names := list }}
|
||||
{{- range $k, $v := . }}
|
||||
{{- $names = append $names $v.name }}
|
||||
{{- end }}
|
||||
{{- $names | join ","}}
|
||||
{{- end }}
|
||||
101
charts/actions-runner-controller-2/templates/deployment.yaml
Normal file
101
charts/actions-runner-controller-2/templates/deployment.yaml
Normal file
@@ -0,0 +1,101 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "actions-runner-controller-2.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "actions-runner-controller-2.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ default 1 .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "actions-runner-controller-2.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
kubectl.kubernetes.io/default-container: "manager"
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: actions-runner-controller
|
||||
app.kubernetes.io/component: controller-manager
|
||||
app.kubernetes.io/version: {{ .Chart.Version }}
|
||||
{{- include "actions-runner-controller-2.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "actions-runner-controller-2.serviceAccountName" . }}
|
||||
{{- with .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.priorityClassName }}
|
||||
priorityClassName: "{{ . }}"
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: manager
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
args:
|
||||
- "--auto-scaling-runner-set-only"
|
||||
{{- if gt (int (default 1 .Values.replicaCount)) 1 }}
|
||||
- "--enable-leader-election"
|
||||
- "--leader-election-id={{ include "actions-runner-controller-2.fullname" . }}"
|
||||
{{- end }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
- "--auto-scaler-image-pull-secrets={{ include "actions-runner-controller-2.imagePullSecretsNames" . }}"
|
||||
{{- end }}
|
||||
{{- with .Values.flags.logLevel }}
|
||||
- "--log-level={{ . }}"
|
||||
{{- end }}
|
||||
command:
|
||||
- "/manager"
|
||||
env:
|
||||
- name: CONTROLLER_MANAGER_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: CONTROLLER_MANAGER_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- with .Values.env }}
|
||||
{{- if kindIs "slice" .Values.env }}
|
||||
{{- toYaml .Values.env | nindent 8 }}
|
||||
{{- else }}
|
||||
{{- range $key, $val := .Values.env }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $val | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.resources }}
|
||||
resources:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp
|
||||
terminationGracePeriodSeconds: 10
|
||||
volumes:
|
||||
- name: tmp
|
||||
emptyDir: {}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,12 @@
|
||||
{{- if gt (int (default 1 .Values.replicaCount)) 1 -}}
|
||||
# permissions to do leader election.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "actions-runner-controller-2.leaderElectionRoleName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
{{- end }}
|
||||
@@ -0,0 +1,15 @@
|
||||
{{- if gt (int (default 1 .Values.replicaCount)) 1 -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "actions-runner-controller-2.leaderElectionRoleBinding" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ include "actions-runner-controller-2.leaderElectionRoleName" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "actions-runner-controller-2.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
170
charts/actions-runner-controller-2/templates/manager_role.yaml
Normal file
170
charts/actions-runner-controller-2/templates/manager_role.yaml
Normal file
@@ -0,0 +1,170 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "actions-runner-controller-2.managerRoleName" . }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalingrunnersets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalingrunnersets/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalingrunnersets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalinglisteners
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalinglisteners/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- autoscalinglisteners/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunnersets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunnersets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunners
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunners/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunners/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- rolebindings
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- update
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- roles
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- update
|
||||
- list
|
||||
- watch
|
||||
@@ -0,0 +1,12 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "actions-runner-controller-2.managerRoleBinding" . }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ include "actions-runner-controller-2.managerRoleName" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "actions-runner-controller-2.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -0,0 +1,13 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "actions-runner-controller-2.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "actions-runner-controller-2.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
533
charts/actions-runner-controller-2/tests/template_test.go
Normal file
533
charts/actions-runner-controller-2/tests/template_test.go
Normal file
@@ -0,0 +1,533 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gruntwork-io/terratest/modules/helm"
|
||||
"github.com/gruntwork-io/terratest/modules/k8s"
|
||||
"github.com/gruntwork-io/terratest/modules/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v2"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
)
|
||||
|
||||
type Chart struct {
|
||||
Version string `yaml:"version"`
|
||||
AppVersion string `yaml:"appVersion"`
|
||||
}
|
||||
|
||||
func TestTemplate_CreateServiceAccount(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-arc"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{
|
||||
"serviceAccount.create": "true",
|
||||
"serviceAccount.annotations.foo": "bar",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/serviceaccount.yaml"})
|
||||
|
||||
var serviceAccount corev1.ServiceAccount
|
||||
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
|
||||
|
||||
assert.Equal(t, namespaceName, serviceAccount.Namespace)
|
||||
assert.Equal(t, "test-arc-actions-runner-controller-2", serviceAccount.Name)
|
||||
assert.Equal(t, "bar", string(serviceAccount.Annotations["foo"]))
|
||||
}
|
||||
|
||||
func TestTemplate_CreateServiceAccount_OverwriteName(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-arc"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{
|
||||
"serviceAccount.create": "true",
|
||||
"serviceAccount.name": "overwritten-name",
|
||||
"serviceAccount.annotations.foo": "bar",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/serviceaccount.yaml"})
|
||||
|
||||
var serviceAccount corev1.ServiceAccount
|
||||
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
|
||||
|
||||
assert.Equal(t, namespaceName, serviceAccount.Namespace)
|
||||
assert.Equal(t, "overwritten-name", serviceAccount.Name)
|
||||
assert.Equal(t, "bar", string(serviceAccount.Annotations["foo"]))
|
||||
}
|
||||
|
||||
func TestTemplate_CreateServiceAccount_CannotUseDefaultServiceAccount(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-arc"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{
|
||||
"serviceAccount.create": "true",
|
||||
"serviceAccount.name": "default",
|
||||
"serviceAccount.annotations.foo": "bar",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/serviceaccount.yaml"})
|
||||
assert.ErrorContains(t, err, "serviceAccount.name cannot be set to 'default'", "We should get an error because the default service account cannot be used")
|
||||
}
|
||||
|
||||
func TestTemplate_NotCreateServiceAccount(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-arc"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{
|
||||
"serviceAccount.create": "false",
|
||||
"serviceAccount.name": "overwritten-name",
|
||||
"serviceAccount.annotations.foo": "bar",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/serviceaccount.yaml"})
|
||||
assert.ErrorContains(t, err, "could not find template templates/serviceaccount.yaml in chart", "We should get an error because the template should be skipped")
|
||||
}
|
||||
|
||||
func TestTemplate_NotCreateServiceAccount_ServiceAccountNotSet(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-arc"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{
|
||||
"serviceAccount.create": "false",
|
||||
"serviceAccount.annotations.foo": "bar",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
|
||||
assert.ErrorContains(t, err, "serviceAccount.name must be set if serviceAccount.create is false", "We should get an error because the default service account cannot be used")
|
||||
}
|
||||
|
||||
func TestTemplate_CreateManagerRole(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-arc"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"})
|
||||
|
||||
var managerRole rbacv1.ClusterRole
|
||||
helm.UnmarshalK8SYaml(t, output, &managerRole)
|
||||
|
||||
assert.Empty(t, managerRole.Namespace, "ClusterRole should not have a namespace")
|
||||
assert.Equal(t, "test-arc-actions-runner-controller-2-manager-role", managerRole.Name)
|
||||
assert.Equal(t, 17, len(managerRole.Rules))
|
||||
}
|
||||
|
||||
func TestTemplate_ManagerRoleBinding(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-arc"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{
|
||||
"serviceAccount.create": "true",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role_binding.yaml"})
|
||||
|
||||
var managerRoleBinding rbacv1.ClusterRoleBinding
|
||||
helm.UnmarshalK8SYaml(t, output, &managerRoleBinding)
|
||||
|
||||
assert.Empty(t, managerRoleBinding.Namespace, "ClusterRoleBinding should not have a namespace")
|
||||
assert.Equal(t, "test-arc-actions-runner-controller-2-manager-rolebinding", managerRoleBinding.Name)
|
||||
assert.Equal(t, "test-arc-actions-runner-controller-2-manager-role", managerRoleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "test-arc-actions-runner-controller-2", managerRoleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, namespaceName, managerRoleBinding.Subjects[0].Namespace)
|
||||
}
|
||||
|
||||
func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
|
||||
require.NoError(t, err)
|
||||
|
||||
chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml"))
|
||||
require.NoError(t, err)
|
||||
|
||||
chart := new(Chart)
|
||||
err = yaml.Unmarshal(chartContent, chart)
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-arc"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{
|
||||
"image.tag": "dev",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
|
||||
|
||||
var deployment appsv1.Deployment
|
||||
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||
|
||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||
assert.Equal(t, "test-arc-actions-runner-controller-2", deployment.Name)
|
||||
assert.Equal(t, "actions-runner-controller-2-"+chart.Version, deployment.Labels["helm.sh/chart"])
|
||||
assert.Equal(t, "actions-runner-controller-2", deployment.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
|
||||
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
|
||||
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
|
||||
|
||||
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
|
||||
|
||||
assert.Equal(t, "actions-runner-controller-2", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"])
|
||||
|
||||
assert.Equal(t, "actions-runner-controller-2", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"])
|
||||
|
||||
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 0)
|
||||
assert.Equal(t, "test-arc-actions-runner-controller-2", deployment.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Nil(t, deployment.Spec.Template.Spec.SecurityContext)
|
||||
assert.Empty(t, deployment.Spec.Template.Spec.PriorityClassName)
|
||||
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Volumes, 1)
|
||||
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Volumes[0].Name)
|
||||
assert.NotNil(t, 10, deployment.Spec.Template.Spec.Volumes[0].EmptyDir)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 0)
|
||||
assert.Nil(t, deployment.Spec.Template.Spec.Affinity)
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers, 1)
|
||||
assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name)
|
||||
assert.Equal(t, "ghcr.io/actions/actions-runner-controller-2:dev", deployment.Spec.Template.Spec.Containers[0].Image)
|
||||
assert.Equal(t, corev1.PullIfNotPresent, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
||||
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 2)
|
||||
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAME", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||
assert.Equal(t, "metadata.name", deployment.Spec.Template.Spec.Containers[0].Env[0].ValueFrom.FieldRef.FieldPath)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
||||
|
||||
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
|
||||
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
|
||||
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name)
|
||||
assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
|
||||
}
|
||||
|
||||
func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
|
||||
require.NoError(t, err)
|
||||
|
||||
chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml"))
|
||||
require.NoError(t, err)
|
||||
|
||||
chart := new(Chart)
|
||||
err = yaml.Unmarshal(chartContent, chart)
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-arc"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{
|
||||
"labels.foo": "bar",
|
||||
"labels.github": "actions",
|
||||
"replicaCount": "1",
|
||||
"image.pullPolicy": "Always",
|
||||
"image.tag": "dev",
|
||||
"imagePullSecrets[0].name": "dockerhub",
|
||||
"nameOverride": "actions-runner-controller-2-override",
|
||||
"fullnameOverride": "actions-runner-controller-2-fullname-override",
|
||||
"serviceAccount.name": "actions-runner-controller-2-sa",
|
||||
"podAnnotations.foo": "bar",
|
||||
"podSecurityContext.fsGroup": "1000",
|
||||
"securityContext.runAsUser": "1000",
|
||||
"securityContext.runAsNonRoot": "true",
|
||||
"resources.limits.cpu": "500m",
|
||||
"nodeSelector.foo": "bar",
|
||||
"tolerations[0].key": "foo",
|
||||
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key": "foo",
|
||||
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator": "bar",
|
||||
"priorityClassName": "test-priority-class",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
|
||||
|
||||
var deployment appsv1.Deployment
|
||||
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||
|
||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||
assert.Equal(t, "actions-runner-controller-2-fullname-override", deployment.Name)
|
||||
assert.Equal(t, "actions-runner-controller-2-"+chart.Version, deployment.Labels["helm.sh/chart"])
|
||||
assert.Equal(t, "actions-runner-controller-2-override", deployment.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
|
||||
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
|
||||
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
|
||||
assert.Equal(t, "bar", deployment.Labels["foo"])
|
||||
assert.Equal(t, "actions", deployment.Labels["github"])
|
||||
|
||||
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
|
||||
|
||||
assert.Equal(t, "actions-runner-controller-2-override", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"])
|
||||
|
||||
assert.Equal(t, "actions-runner-controller-2-override", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"])
|
||||
|
||||
assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"])
|
||||
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1)
|
||||
assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name)
|
||||
assert.Equal(t, "actions-runner-controller-2-sa", deployment.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.SecurityContext.FSGroup)
|
||||
assert.Equal(t, "test-priority-class", deployment.Spec.Template.Spec.PriorityClassName)
|
||||
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Volumes, 1)
|
||||
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Volumes[0].Name)
|
||||
assert.NotNil(t, 10, deployment.Spec.Template.Spec.Volumes[0].EmptyDir)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 1)
|
||||
assert.Equal(t, "bar", deployment.Spec.Template.Spec.NodeSelector["foo"])
|
||||
|
||||
assert.NotNil(t, deployment.Spec.Template.Spec.Affinity.NodeAffinity)
|
||||
assert.Equal(t, "foo", deployment.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Key)
|
||||
assert.Equal(t, "bar", string(deployment.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Operator))
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 1)
|
||||
assert.Equal(t, "foo", deployment.Spec.Template.Spec.Tolerations[0].Key)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers, 1)
|
||||
assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name)
|
||||
assert.Equal(t, "ghcr.io/actions/actions-runner-controller-2:dev", deployment.Spec.Template.Spec.Containers[0].Image)
|
||||
assert.Equal(t, corev1.PullAlways, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
||||
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3)
|
||||
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
||||
assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAME", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||
assert.Equal(t, "metadata.name", deployment.Spec.Template.Spec.Containers[0].Env[0].ValueFrom.FieldRef.FieldPath)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
||||
|
||||
assert.Equal(t, "500m", deployment.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String())
|
||||
assert.True(t, *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsNonRoot)
|
||||
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsUser)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
|
||||
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name)
|
||||
assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
|
||||
}
|
||||
|
||||
func TestTemplate_EnableLeaderElectionRole(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-arc"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{
|
||||
"replicaCount": "2",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/leader_election_role.yaml"})
|
||||
|
||||
var leaderRole rbacv1.Role
|
||||
helm.UnmarshalK8SYaml(t, output, &leaderRole)
|
||||
|
||||
assert.Equal(t, "test-arc-actions-runner-controller-2-leader-election-role", leaderRole.Name)
|
||||
assert.Equal(t, namespaceName, leaderRole.Namespace)
|
||||
}
|
||||
|
||||
func TestTemplate_EnableLeaderElectionRoleBinding(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-arc"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{
|
||||
"replicaCount": "2",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/leader_election_role_binding.yaml"})
|
||||
|
||||
var leaderRoleBinding rbacv1.RoleBinding
|
||||
helm.UnmarshalK8SYaml(t, output, &leaderRoleBinding)
|
||||
|
||||
assert.Equal(t, "test-arc-actions-runner-controller-2-leader-election-rolebinding", leaderRoleBinding.Name)
|
||||
assert.Equal(t, namespaceName, leaderRoleBinding.Namespace)
|
||||
assert.Equal(t, "test-arc-actions-runner-controller-2-leader-election-role", leaderRoleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "test-arc-actions-runner-controller-2", leaderRoleBinding.Subjects[0].Name)
|
||||
}
|
||||
|
||||
func TestTemplate_EnableLeaderElection(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-arc"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{
|
||||
"replicaCount": "2",
|
||||
"image.tag": "dev",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
|
||||
|
||||
var deployment appsv1.Deployment
|
||||
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||
|
||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||
assert.Equal(t, "test-arc-actions-runner-controller-2", deployment.Name)
|
||||
|
||||
assert.Equal(t, int32(2), *deployment.Spec.Replicas)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers, 1)
|
||||
assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name)
|
||||
assert.Equal(t, "ghcr.io/actions/actions-runner-controller-2:dev", deployment.Spec.Template.Spec.Containers[0].Image)
|
||||
assert.Equal(t, corev1.PullIfNotPresent, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
||||
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 4)
|
||||
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
||||
assert.Equal(t, "--enable-leader-election", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||
assert.Equal(t, "--leader-election-id=test-arc-actions-runner-controller-2", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[3])
|
||||
}
|
||||
|
||||
func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-arc"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{
|
||||
"imagePullSecrets[0].name": "dockerhub",
|
||||
"imagePullSecrets[1].name": "ghcr",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
|
||||
|
||||
var deployment appsv1.Deployment
|
||||
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||
|
||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3)
|
||||
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
||||
assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub,ghcr", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
||||
}
|
||||
70
charts/actions-runner-controller-2/values.yaml
Normal file
70
charts/actions-runner-controller-2/values.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
# Default values for actions-runner-controller-2.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
labels: {}
|
||||
|
||||
# leaderElection will be enabled when replicaCount>1,
|
||||
# So, only one replica will in charge of reconciliation at a given time
|
||||
# leaderElectionId will be set to {{ define actions-runner-controller-2.fullname }}.
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: "ghcr.io/actions/actions-runner-controller-2"
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created for running the controller pod
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
# You can not use the default service account for this.
|
||||
name: ""
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
# Leverage a PriorityClass to ensure your pods survive resource shortages
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||
# PriorityClass: system-cluster-critical
|
||||
priorityClassName: ""
|
||||
|
||||
flags:
|
||||
# Log level can be set here with one of the following values: "debug", "info", "warn", "error".
|
||||
# Defaults to "debug".
|
||||
logLevel: "debug"
|
||||
@@ -15,15 +15,15 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.20.1
|
||||
version: 0.22.0
|
||||
|
||||
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
||||
appVersion: 0.25.1
|
||||
appVersion: 0.27.0
|
||||
|
||||
home: https://github.com/actions-runner-controller/actions-runner-controller
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
sources:
|
||||
- https://github.com/actions-runner-controller/actions-runner-controller
|
||||
- https://github.com/actions/actions-runner-controller
|
||||
|
||||
maintainers:
|
||||
- name: actions-runner-controller
|
||||
|
||||
@@ -4,108 +4,148 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
|
||||
## Values
|
||||
|
||||
**_The values are documented as of HEAD, to review the configuration options for your chart version ensure you view this file at the relevant [tag](https://github.com/actions-runner-controller/actions-runner-controller/tags)_**
|
||||
**_The values are documented as of HEAD, to review the configuration options for your chart version ensure you view this file at the relevant [tag](https://github.com/actions/actions-runner-controller/tags)_**
|
||||
|
||||
> _Default values are the defaults set in the charts `values.yaml`, some properties have default configurations in the code for when the property is omitted or invalid_
|
||||
|
||||
| Key | Description | Default |
|
||||
|----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------|
|
||||
| `labels` | Set labels to apply to all resources in the chart | |
|
||||
| `replicaCount` | Set the number of controller pods | 1 |
|
||||
| `webhookPort` | Set the containerPort for the webhook Pod | 9443 |
|
||||
| `syncPeriod` | Set the period in which the controler reconciles the desired runners count | 10m |
|
||||
| `enableLeaderElection` | Enable election configuration | true |
|
||||
| `leaderElectionId` | Set the election ID for the controller group | |
|
||||
| `githubEnterpriseServerURL` | Set the URL for a self-hosted GitHub Enterprise Server | |
|
||||
| `githubURL` | Override GitHub URL to be used for GitHub API calls | |
|
||||
| `githubUploadURL` | Override GitHub Upload URL to be used for GitHub API calls | |
|
||||
| `runnerGithubURL` | Override GitHub URL to be used by runners during registration | |
|
||||
| `logLevel` | Set the log level of the controller container | |
|
||||
| `additionalVolumes` | Set additional volumes to add to the manager container | |
|
||||
| `additionalVolumeMounts` | Set additional volume mounts to add to the manager container | |
|
||||
| `authSecret.create` | Deploy the controller auth secret | false |
|
||||
| `authSecret.name` | Set the name of the auth secret | controller-manager |
|
||||
| `authSecret.annotations` | Set annotations for the auth Secret | |
|
||||
| `authSecret.github_app_id` | The ID of your GitHub App. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||
| `authSecret.github_app_installation_id` | The ID of your GitHub App installation. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||
| `authSecret.github_app_private_key` | The multiline string of your GitHub App's private key. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||
| `authSecret.github_token` | Your chosen GitHub PAT token. **This can't be set at the same time as the `authSecret.github_app_*`** | |
|
||||
| `authSecret.github_basicauth_username` | Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
|
||||
| `authSecret.github_basicauth_password` | Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
|
||||
| `dockerRegistryMirror` | The default Docker Registry Mirror used by runners. | |
|
||||
| `hostNetwork` | The "hostNetwork" of the controller container | false |
|
||||
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
|
||||
| `image.tag` | The tag of the controller container | |
|
||||
| `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest |
|
||||
| `image.actionsRunnerImagePullSecrets` | Optional image pull secrets to be included in the runner pod's ImagePullSecrets | |
|
||||
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
|
||||
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
|
||||
| `metrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||
| `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | |
|
||||
| `metrics.port` | Set port of metrics service | 8443 |
|
||||
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
|
||||
| `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
||||
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
|
||||
| `fullnameOverride` | Override the full resource names | |
|
||||
| `nameOverride` | Override the resource name prefix | |
|
||||
| `serviceAccount.annotations` | Set annotations to the service account | |
|
||||
| `serviceAccount.create` | Deploy the controller pod under a service account | true |
|
||||
| `podAnnotations` | Set annotations for the controller pod | |
|
||||
| `podLabels` | Set labels for the controller pod | |
|
||||
| `serviceAccount.name` | Set the name of the service account | |
|
||||
| `securityContext` | Set the security context for each container in the controller pod | |
|
||||
| `podSecurityContext` | Set the security context to controller pod | |
|
||||
| `service.annotations` | Set annotations for the provisioned webhook service resource | |
|
||||
| `service.port` | Set controller service ports | |
|
||||
| `service.type` | Set controller service type | |
|
||||
| `topologySpreadConstraints` | Set the controller pod topologySpreadConstraints | |
|
||||
| `nodeSelector` | Set the controller pod nodeSelector | |
|
||||
| `resources` | Set the controller pod resources | |
|
||||
| `affinity` | Set the controller pod affinity rules | |
|
||||
| `podDisruptionBudget.enabled` | Enables a PDB to ensure HA of controller pods | false |
|
||||
| `podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
|
||||
| `podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
|
||||
| `tolerations` | Set the controller pod tolerations | |
|
||||
| `env` | Set environment variables for the controller container | |
|
||||
| `priorityClassName` | Set the controller pod priorityClassName | |
|
||||
| `scope.watchNamespace` | Tells the controller and the github webhook server which namespace to watch if `scope.singleNamespace` is true | `Release.Namespace` (the default namespace of the helm chart). |
|
||||
| `scope.singleNamespace` | Limit the controller to watch a single namespace | false |
|
||||
| `certManagerEnabled` | Enable cert-manager. If disabled you must set admissionWebHooks.caBundle and create TLS secrets manually | true |
|
||||
| `admissionWebHooks.caBundle` | Base64-encoded PEM bundle containing the CA that signed the webhook's serving certificate | |
|
||||
| `githubWebhookServer.logLevel` | Set the log level of the githubWebhookServer container | |
|
||||
| `githubWebhookServer.replicaCount` | Set the number of webhook server pods | 1 |
|
||||
| `githubWebhookServer.useRunnerGroupsVisibility` | Enable supporting runner groups with custom visibility. This will incur in extra API calls and may blow up your budget. Currently, you also need to set `githubWebhookServer.secret.enabled` to enable this feature. | false |
|
||||
| `githubWebhookServer.syncPeriod` | Set the period in which the controller reconciles the resources | 10m |
|
||||
| `githubWebhookServer.enabled` | Deploy the webhook server pod | false |
|
||||
| `githubWebhookServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
|
||||
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | false |
|
||||
| `githubWebhookServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
|
||||
| `githubWebhookServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
|
||||
| `githubWebhookServer.imagePullSecrets` | Specifies the secret to be used when pulling the githubWebhookServer pod containers | |
|
||||
| `githubWebhookServer.nameOverride` | Override the resource name prefix | |
|
||||
| `githubWebhookServer.fullnameOverride` | Override the full resource names | |
|
||||
| `githubWebhookServer.serviceAccount.create` | Deploy the githubWebhookServer under a service account | true |
|
||||
| `githubWebhookServer.serviceAccount.annotations` | Set annotations for the service account | |
|
||||
| `githubWebhookServer.serviceAccount.name` | Set the service account name | |
|
||||
| `githubWebhookServer.podAnnotations` | Set annotations for the githubWebhookServer pod | |
|
||||
| `githubWebhookServer.podLabels` | Set labels for the githubWebhookServer pod | |
|
||||
| `githubWebhookServer.podSecurityContext` | Set the security context to githubWebhookServer pod | |
|
||||
| `githubWebhookServer.securityContext` | Set the security context for each container in the githubWebhookServer pod | |
|
||||
| `githubWebhookServer.resources` | Set the githubWebhookServer pod resources | |
|
||||
| `githubWebhookServer.topologySpreadConstraints` | Set the githubWebhookServer pod topologySpreadConstraints | |
|
||||
| `githubWebhookServer.nodeSelector` | Set the githubWebhookServer pod nodeSelector | |
|
||||
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
|
||||
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
|
||||
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
|
||||
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
|
||||
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
||||
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
|
||||
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
|
||||
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
||||
| `githubWebhookServer.ingress.tls` | Set tls configuration for ingress | |
|
||||
| `githubWebhookServer.ingress.ingressClassName` | Set ingress class name | |
|
||||
| `githubWebhookServer.podDisruptionBudget.enabled` | Enables a PDB to ensure HA of githubwebhook pods | false |
|
||||
| `githubWebhookServer.podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
|
||||
| `githubWebhookServer.podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
|
||||
| Key | Description | Default |
|
||||
|----------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------|
|
||||
| `labels` | Set labels to apply to all resources in the chart | |
|
||||
| `replicaCount` | Set the number of controller pods | 1 |
|
||||
| `webhookPort` | Set the containerPort for the webhook Pod | 9443 |
|
||||
| `syncPeriod` | Set the period in which the controller reconciles the desired runners count | 1m |
|
||||
| `enableLeaderElection` | Enable election configuration | true |
|
||||
| `leaderElectionId` | Set the election ID for the controller group | |
|
||||
| `githubEnterpriseServerURL` | Set the URL for a self-hosted GitHub Enterprise Server | |
|
||||
| `githubURL` | Override GitHub URL to be used for GitHub API calls | |
|
||||
| `githubUploadURL` | Override GitHub Upload URL to be used for GitHub API calls | |
|
||||
| `runnerGithubURL` | Override GitHub URL to be used by runners during registration | |
|
||||
| `logLevel` | Set the log level of the controller container | |
|
||||
| `logFormat` | Set the log format of the controller. Valid options are "text" and "json" | text |
|
||||
| `additionalVolumes` | Set additional volumes to add to the manager container | |
|
||||
| `additionalVolumeMounts` | Set additional volume mounts to add to the manager container | |
|
||||
| `authSecret.create` | Deploy the controller auth secret | false |
|
||||
| `authSecret.name` | Set the name of the auth secret | controller-manager |
|
||||
| `authSecret.annotations` | Set annotations for the auth Secret | |
|
||||
| `authSecret.github_app_id` | The ID of your GitHub App. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||
| `authSecret.github_app_installation_id` | The ID of your GitHub App installation. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||
| `authSecret.github_app_private_key` | The multiline string of your GitHub App's private key. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||
| `authSecret.github_token` | Your chosen GitHub PAT token. **This can't be set at the same time as the `authSecret.github_app_*`** | |
|
||||
| `authSecret.github_basicauth_username` | Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
|
||||
| `authSecret.github_basicauth_password` | Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
|
||||
| `dockerRegistryMirror` | The default Docker Registry Mirror used by runners. | |
|
||||
| `hostNetwork` | The "hostNetwork" of the controller container | false |
|
||||
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
|
||||
| `image.tag` | The tag of the controller container | |
|
||||
| `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest |
|
||||
| `image.actionsRunnerImagePullSecrets` | Optional image pull secrets to be included in the runner pod's ImagePullSecrets | |
|
||||
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
|
||||
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
|
||||
| `metrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||
| `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | |
|
||||
| `metrics.port` | Set port of metrics service | 8443 |
|
||||
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
|
||||
| `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
||||
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
|
||||
| `fullnameOverride` | Override the full resource names | |
|
||||
| `nameOverride` | Override the resource name prefix | |
|
||||
| `serviceAccount.annotations` | Set annotations to the service account | |
|
||||
| `serviceAccount.create` | Deploy the controller pod under a service account | true |
|
||||
| `podAnnotations` | Set annotations for the controller pod | |
|
||||
| `podLabels` | Set labels for the controller pod | |
|
||||
| `serviceAccount.name` | Set the name of the service account | |
|
||||
| `securityContext` | Set the security context for each container in the controller pod | |
|
||||
| `podSecurityContext` | Set the security context to controller pod | |
|
||||
| `service.annotations` | Set annotations for the provisioned webhook service resource | |
|
||||
| `service.port` | Set controller service ports | |
|
||||
| `service.type` | Set controller service type | |
|
||||
| `topologySpreadConstraints` | Set the controller pod topologySpreadConstraints | |
|
||||
| `nodeSelector` | Set the controller pod nodeSelector | |
|
||||
| `resources` | Set the controller pod resources | |
|
||||
| `affinity` | Set the controller pod affinity rules | |
|
||||
| `podDisruptionBudget.enabled` | Enables a PDB to ensure HA of controller pods | false |
|
||||
| `podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
|
||||
| `podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
|
||||
| `tolerations` | Set the controller pod tolerations | |
|
||||
| `env` | Set environment variables for the controller container | |
|
||||
| `priorityClassName` | Set the controller pod priorityClassName | |
|
||||
| `scope.watchNamespace` | Tells the controller and the github webhook server which namespace to watch if `scope.singleNamespace` is true | `Release.Namespace` (the default namespace of the helm chart). |
|
||||
| `scope.singleNamespace` | Limit the controller to watch a single namespace | false |
|
||||
| `certManagerEnabled` | Enable cert-manager. If disabled you must set admissionWebHooks.caBundle and create TLS secrets manually | true |
|
||||
| `runner.statusUpdateHook.enabled` | Use custom RBAC for runners (role, role binding and service account), this will enable reporting runner statuses | false |
|
||||
| `admissionWebHooks.caBundle` | Base64-encoded PEM bundle containing the CA that signed the webhook's serving certificate | |
|
||||
| `githubWebhookServer.logLevel` | Set the log level of the githubWebhookServer container | |
|
||||
| `githubWebhookServer.logFormat` | Set the log format of the githubWebhookServer controller. Valid options are "text" and "json" | text |
|
||||
| `githubWebhookServer.replicaCount` | Set the number of webhook server pods | 1 |
|
||||
| `githubWebhookServer.useRunnerGroupsVisibility` | Enable supporting runner groups with custom visibility, you also need to set `githubWebhookServer.secret.enabled` to enable this feature. | false |
|
||||
| `githubWebhookServer.enabled` | Deploy the webhook server pod | false |
|
||||
| `githubWebhookServer.queueLimit` | Set the queue size limit in the githubWebhookServer | |
|
||||
| `githubWebhookServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
|
||||
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | false |
|
||||
| `githubWebhookServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
|
||||
| `githubWebhookServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
|
||||
| `githubWebhookServer.imagePullSecrets` | Specifies the secret to be used when pulling the githubWebhookServer pod containers | |
|
||||
| `githubWebhookServer.nameOverride` | Override the resource name prefix | |
|
||||
| `githubWebhookServer.fullnameOverride` | Override the full resource names | |
|
||||
| `githubWebhookServer.serviceAccount.create` | Deploy the githubWebhookServer under a service account | true |
|
||||
| `githubWebhookServer.serviceAccount.annotations` | Set annotations for the service account | |
|
||||
| `githubWebhookServer.serviceAccount.name` | Set the service account name | |
|
||||
| `githubWebhookServer.podAnnotations` | Set annotations for the githubWebhookServer pod | |
|
||||
| `githubWebhookServer.podLabels` | Set labels for the githubWebhookServer pod | |
|
||||
| `githubWebhookServer.podSecurityContext` | Set the security context to githubWebhookServer pod | |
|
||||
| `githubWebhookServer.securityContext` | Set the security context for each container in the githubWebhookServer pod | |
|
||||
| `githubWebhookServer.resources` | Set the githubWebhookServer pod resources | |
|
||||
| `githubWebhookServer.topologySpreadConstraints` | Set the githubWebhookServer pod topologySpreadConstraints | |
|
||||
| `githubWebhookServer.nodeSelector` | Set the githubWebhookServer pod nodeSelector | |
|
||||
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
|
||||
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
|
||||
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
|
||||
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
|
||||
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
||||
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
|
||||
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
|
||||
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
||||
| `githubWebhookServer.ingress.tls` | Set tls configuration for ingress | |
|
||||
| `githubWebhookServer.ingress.ingressClassName` | Set ingress class name | |
|
||||
| `githubWebhookServer.podDisruptionBudget.enabled` | Enables a PDB to ensure HA of githubwebhook pods | false |
|
||||
| `githubWebhookServer.podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
|
||||
| `githubWebhookServer.podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
|
||||
| `actionsMetricsServer.logLevel` | Set the log level of the actionsMetricsServer container | |
|
||||
| `actionsMetricsServer.logFormat` | Set the log format of the actionsMetricsServer controller. Valid options are "text" and "json" | text |
|
||||
| `actionsMetricsServer.enabled` | Deploy the actions metrics server pod | false |
|
||||
| `actionsMetricsServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
|
||||
| `actionsMetricsServer.secret.create` | Deploy the webhook hook secret | false |
|
||||
| `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
|
||||
| `actionsMetricsServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
|
||||
| `actionsMetricsServer.imagePullSecrets` | Specifies the secret to be used when pulling the actionsMetricsServer pod containers | |
|
||||
| `actionsMetricsServer.nameOverride` | Override the resource name prefix | |
|
||||
| `actionsMetricsServer.fullnameOverride` | Override the full resource names | |
|
||||
| `actionsMetricsServer.serviceAccount.create` | Deploy the actionsMetricsServer under a service account | true |
|
||||
| `actionsMetricsServer.serviceAccount.annotations` | Set annotations for the service account | |
|
||||
| `actionsMetricsServer.serviceAccount.name` | Set the service account name | |
|
||||
| `actionsMetricsServer.podAnnotations` | Set annotations for the actionsMetricsServer pod | |
|
||||
| `actionsMetricsServer.podLabels` | Set labels for the actionsMetricsServer pod | |
|
||||
| `actionsMetricsServer.podSecurityContext` | Set the security context to actionsMetricsServer pod | |
|
||||
| `actionsMetricsServer.securityContext` | Set the security context for each container in the actionsMetricsServer pod | |
|
||||
| `actionsMetricsServer.resources` | Set the actionsMetricsServer pod resources | |
|
||||
| `actionsMetricsServer.topologySpreadConstraints` | Set the actionsMetricsServer pod topologySpreadConstraints | |
|
||||
| `actionsMetricsServer.nodeSelector` | Set the actionsMetricsServer pod nodeSelector | |
|
||||
| `actionsMetricsServer.tolerations` | Set the actionsMetricsServer pod tolerations | |
|
||||
| `actionsMetricsServer.affinity` | Set the actionsMetricsServer pod affinity rules | |
|
||||
| `actionsMetricsServer.priorityClassName` | Set the actionsMetricsServer pod priorityClassName | |
|
||||
| `actionsMetricsServer.service.type` | Set actionsMetricsServer service type | |
|
||||
| `actionsMetricsServer.service.ports` | Set actionsMetricsServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
||||
| `actionsMetricsServer.ingress.enabled` | Deploy an ingress kind for the actionsMetricsServer | false |
|
||||
| `actionsMetricsServer.ingress.annotations` | Set annotations for the ingress kind | |
|
||||
| `actionsMetricsServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
||||
| `actionsMetricsServer.ingress.tls` | Set tls configuration for ingress | |
|
||||
| `actionsMetricsServer.ingress.ingressClassName` | Set ingress class name | |
|
||||
| `actionsMetrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||
| `actionsMetrics.serviceAnnotations` | Set annotations for the provisioned actions metrics service resource | |
|
||||
| `actionsMetrics.port` | Set port of actions metrics service | 8443 |
|
||||
| `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||
| `actionsMetrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||
| `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
|
||||
| `actionsMetrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
||||
|
||||
@@ -61,6 +61,16 @@ spec:
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
githubAPICredentialsFrom:
|
||||
properties:
|
||||
secretRef:
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: object
|
||||
maxReplicas:
|
||||
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
|
||||
type: integer
|
||||
@@ -92,7 +102,7 @@ spec:
|
||||
description: ScaleUpThreshold is the percentage of busy runners greater than which will trigger the hpa to scale runners up.
|
||||
type: string
|
||||
type:
|
||||
description: Type is the type of metric to be used for autoscaling. The only supported Type is TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||
description: Type is the type of metric to be used for autoscaling. It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
@@ -170,7 +180,7 @@ spec:
|
||||
scheduledOverrides:
|
||||
description: ScheduledOverrides is the list of ScheduledOverride. It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. The earlier a scheduled override is, the higher it is prioritized.
|
||||
items:
|
||||
description: ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. A schedule can optionally be recurring, so that the correspoding override happens every day, week, month, or year.
|
||||
description: ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
|
||||
properties:
|
||||
endTime:
|
||||
description: EndTime is the time at which the first override ends.
|
||||
|
||||
@@ -49,7 +49,7 @@ spec:
|
||||
description: RunnerDeploymentSpec defines the desired state of RunnerDeployment
|
||||
properties:
|
||||
effectiveTime:
|
||||
description: EffectiveTime is the time the upstream controller requested to sync Replicas. It is usually populated by the webhook-based autoscaler via HRA. The value is inherited to RunnerRepicaSet(s) and used to prevent ephemeral runners from unnecessarily recreated.
|
||||
description: EffectiveTime is the time the upstream controller requested to sync Replicas. It is usually populated by the webhook-based autoscaler via HRA. The value is inherited to RunnerReplicaSet(s) and used to prevent ephemeral runners from unnecessarily recreated.
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
@@ -946,7 +946,7 @@ spec:
|
||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||
type: string
|
||||
ports:
|
||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
||||
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||
items:
|
||||
description: ContainerPort represents a network port in a single container.
|
||||
properties:
|
||||
@@ -1081,6 +1081,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -1381,6 +1393,9 @@ spec:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
dnsPolicy:
|
||||
description: DNSPolicy defines how a pod's DNS will be configured.
|
||||
type: string
|
||||
dockerEnabled:
|
||||
type: boolean
|
||||
dockerEnv:
|
||||
@@ -1497,6 +1512,18 @@ spec:
|
||||
dockerdContainerResources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -1635,7 +1662,7 @@ spec:
|
||||
type: boolean
|
||||
ephemeralContainers:
|
||||
items:
|
||||
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted. \n This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate."
|
||||
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted."
|
||||
properties:
|
||||
args:
|
||||
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
||||
@@ -2138,6 +2165,18 @@ spec:
|
||||
resources:
|
||||
description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -2415,6 +2454,16 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
githubAPICredentialsFrom:
|
||||
properties:
|
||||
secretRef:
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: object
|
||||
group:
|
||||
type: string
|
||||
hostAliases:
|
||||
@@ -2815,7 +2864,7 @@ spec:
|
||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||
type: string
|
||||
ports:
|
||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
||||
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||
items:
|
||||
description: ContainerPort represents a network port in a single container.
|
||||
properties:
|
||||
@@ -2950,6 +2999,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -3243,6 +3304,18 @@ spec:
|
||||
resources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -3315,7 +3388,7 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
supplementalGroups:
|
||||
description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.
|
||||
description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.
|
||||
items:
|
||||
format: int64
|
||||
type: integer
|
||||
@@ -3725,7 +3798,7 @@ spec:
|
||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||
type: string
|
||||
ports:
|
||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
||||
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||
items:
|
||||
description: ContainerPort represents a network port in a single container.
|
||||
properties:
|
||||
@@ -3860,6 +3933,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -4193,16 +4278,28 @@ spec:
|
||||
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
matchLabelKeys:
|
||||
description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
maxSkew:
|
||||
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
||||
format: int32
|
||||
type: integer
|
||||
minDomains:
|
||||
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate."
|
||||
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)."
|
||||
format: int32
|
||||
type: integer
|
||||
nodeAffinityPolicy:
|
||||
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
type: string
|
||||
nodeTaintsPolicy:
|
||||
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
type: string
|
||||
topologyKey:
|
||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||
type: string
|
||||
whenUnsatisfiable:
|
||||
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
||||
@@ -4529,7 +4626,7 @@ spec:
|
||||
type: string
|
||||
type: array
|
||||
dataSource:
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.'
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -4545,7 +4642,7 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
dataSourceRef:
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.'
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -4556,6 +4653,9 @@ spec:
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
@@ -4563,6 +4663,18 @@ spec:
|
||||
resources:
|
||||
description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -5191,6 +5303,18 @@ spec:
|
||||
resources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
|
||||
@@ -943,7 +943,7 @@ spec:
|
||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||
type: string
|
||||
ports:
|
||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
||||
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||
items:
|
||||
description: ContainerPort represents a network port in a single container.
|
||||
properties:
|
||||
@@ -1078,6 +1078,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -1378,6 +1390,9 @@ spec:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
dnsPolicy:
|
||||
description: DNSPolicy defines how a pod's DNS will be configured.
|
||||
type: string
|
||||
dockerEnabled:
|
||||
type: boolean
|
||||
dockerEnv:
|
||||
@@ -1494,6 +1509,18 @@ spec:
|
||||
dockerdContainerResources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -1632,7 +1659,7 @@ spec:
|
||||
type: boolean
|
||||
ephemeralContainers:
|
||||
items:
|
||||
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted. \n This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate."
|
||||
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted."
|
||||
properties:
|
||||
args:
|
||||
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
||||
@@ -2135,6 +2162,18 @@ spec:
|
||||
resources:
|
||||
description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -2412,6 +2451,16 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
githubAPICredentialsFrom:
|
||||
properties:
|
||||
secretRef:
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: object
|
||||
group:
|
||||
type: string
|
||||
hostAliases:
|
||||
@@ -2812,7 +2861,7 @@ spec:
|
||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||
type: string
|
||||
ports:
|
||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
||||
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||
items:
|
||||
description: ContainerPort represents a network port in a single container.
|
||||
properties:
|
||||
@@ -2947,6 +2996,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -3240,6 +3301,18 @@ spec:
|
||||
resources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -3312,7 +3385,7 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
supplementalGroups:
|
||||
description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.
|
||||
description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.
|
||||
items:
|
||||
format: int64
|
||||
type: integer
|
||||
@@ -3722,7 +3795,7 @@ spec:
|
||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||
type: string
|
||||
ports:
|
||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
||||
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||
items:
|
||||
description: ContainerPort represents a network port in a single container.
|
||||
properties:
|
||||
@@ -3857,6 +3930,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -4190,16 +4275,28 @@ spec:
|
||||
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
matchLabelKeys:
|
||||
description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
maxSkew:
|
||||
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
||||
format: int32
|
||||
type: integer
|
||||
minDomains:
|
||||
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate."
|
||||
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)."
|
||||
format: int32
|
||||
type: integer
|
||||
nodeAffinityPolicy:
|
||||
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
type: string
|
||||
nodeTaintsPolicy:
|
||||
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
type: string
|
||||
topologyKey:
|
||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||
type: string
|
||||
whenUnsatisfiable:
|
||||
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
||||
@@ -4526,7 +4623,7 @@ spec:
|
||||
type: string
|
||||
type: array
|
||||
dataSource:
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.'
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -4542,7 +4639,7 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
dataSourceRef:
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.'
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -4553,6 +4650,9 @@ spec:
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
@@ -4560,6 +4660,18 @@ spec:
|
||||
resources:
|
||||
description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -5188,6 +5300,18 @@ spec:
|
||||
resources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
|
||||
@@ -24,12 +24,24 @@ spec:
|
||||
- jsonPath: .spec.repository
|
||||
name: Repository
|
||||
type: string
|
||||
- jsonPath: .spec.group
|
||||
name: Group
|
||||
type: string
|
||||
- jsonPath: .spec.labels
|
||||
name: Labels
|
||||
type: string
|
||||
- jsonPath: .status.phase
|
||||
name: Status
|
||||
type: string
|
||||
- jsonPath: .status.message
|
||||
name: Message
|
||||
type: string
|
||||
- jsonPath: .status.workflow.repository
|
||||
name: WF Repo
|
||||
type: string
|
||||
- jsonPath: .status.workflow.runID
|
||||
name: WF Run
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
@@ -884,7 +896,7 @@ spec:
|
||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||
type: string
|
||||
ports:
|
||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
||||
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||
items:
|
||||
description: ContainerPort represents a network port in a single container.
|
||||
properties:
|
||||
@@ -1019,6 +1031,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -1319,6 +1343,9 @@ spec:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
dnsPolicy:
|
||||
description: DNSPolicy defines how a pod's DNS will be configured.
|
||||
type: string
|
||||
dockerEnabled:
|
||||
type: boolean
|
||||
dockerEnv:
|
||||
@@ -1435,6 +1462,18 @@ spec:
|
||||
dockerdContainerResources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -1573,7 +1612,7 @@ spec:
|
||||
type: boolean
|
||||
ephemeralContainers:
|
||||
items:
|
||||
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted. \n This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate."
|
||||
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted."
|
||||
properties:
|
||||
args:
|
||||
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
||||
@@ -2076,6 +2115,18 @@ spec:
|
||||
resources:
|
||||
description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -2353,6 +2404,16 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
githubAPICredentialsFrom:
|
||||
properties:
|
||||
secretRef:
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: object
|
||||
group:
|
||||
type: string
|
||||
hostAliases:
|
||||
@@ -2753,7 +2814,7 @@ spec:
|
||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||
type: string
|
||||
ports:
|
||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
||||
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||
items:
|
||||
description: ContainerPort represents a network port in a single container.
|
||||
properties:
|
||||
@@ -2888,6 +2949,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -3181,6 +3254,18 @@ spec:
|
||||
resources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -3253,7 +3338,7 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
supplementalGroups:
|
||||
description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.
|
||||
description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.
|
||||
items:
|
||||
format: int64
|
||||
type: integer
|
||||
@@ -3663,7 +3748,7 @@ spec:
|
||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||
type: string
|
||||
ports:
|
||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
||||
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||
items:
|
||||
description: ContainerPort represents a network port in a single container.
|
||||
properties:
|
||||
@@ -3798,6 +3883,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -4131,16 +4228,28 @@ spec:
|
||||
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
matchLabelKeys:
|
||||
description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
maxSkew:
|
||||
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
||||
format: int32
|
||||
type: integer
|
||||
minDomains:
|
||||
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate."
|
||||
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)."
|
||||
format: int32
|
||||
type: integer
|
||||
nodeAffinityPolicy:
|
||||
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
type: string
|
||||
nodeTaintsPolicy:
|
||||
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
type: string
|
||||
topologyKey:
|
||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||
type: string
|
||||
whenUnsatisfiable:
|
||||
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
||||
@@ -4467,7 +4576,7 @@ spec:
|
||||
type: string
|
||||
type: array
|
||||
dataSource:
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.'
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -4483,7 +4592,7 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
dataSourceRef:
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.'
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -4494,6 +4603,9 @@ spec:
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
@@ -4501,6 +4613,18 @@ spec:
|
||||
resources:
|
||||
description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -5129,6 +5253,18 @@ spec:
|
||||
resources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -5194,6 +5330,31 @@ spec:
|
||||
- expiresAt
|
||||
- token
|
||||
type: object
|
||||
workflow:
|
||||
description: WorkflowStatus contains various information that is propagated from GitHub Actions workflow run environment variables to ease monitoring workflow run/job/steps that are triggerred on the runner.
|
||||
properties:
|
||||
action:
|
||||
description: Action is the name of the current action or the step ID of the current step that is triggerred within the runner. It corresponds to GITHUB_ACTION defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
type: string
|
||||
job:
|
||||
description: Job is the name of the current job that is triggerred within the runner. It corresponds to GITHUB_JOB defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of the workflow that is triggerred within the runner. It corresponds to GITHUB_WORKFLOW defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
type: string
|
||||
repository:
|
||||
description: Repository is the owner and repository name of the workflow that is triggerred within the runner. It corresponds to GITHUB_REPOSITORY defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
type: string
|
||||
repositoryOwner:
|
||||
description: ReositoryOwner is the repository owner's name for the workflow that is triggerred within the runner. It corresponds to GITHUB_REPOSITORY_OWNER defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
type: string
|
||||
runID:
|
||||
description: RunID is the unique number for the current workflow run that is triggerred within the runner. It corresponds to GITHUB_RUN_ID defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
type: string
|
||||
runNumber:
|
||||
description: GITHUB_RUN_NUMBER is the unique number for the current workflow run that is triggerred within the runner. It corresponds to GITHUB_RUN_ID defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
|
||||
@@ -67,6 +67,16 @@ spec:
|
||||
type: string
|
||||
ephemeral:
|
||||
type: boolean
|
||||
githubAPICredentialsFrom:
|
||||
properties:
|
||||
secretRef:
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: object
|
||||
group:
|
||||
type: string
|
||||
image:
|
||||
@@ -76,9 +86,17 @@ spec:
|
||||
type: string
|
||||
type: array
|
||||
minReadySeconds:
|
||||
description: Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.
|
||||
description: Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)
|
||||
format: int32
|
||||
type: integer
|
||||
ordinals:
|
||||
description: ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a "0" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is alpha.
|
||||
properties:
|
||||
start:
|
||||
description: 'start is the number representing the first replica''s index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range: [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas). If unset, defaults to 0. Replica indices will be in the range: [0, .spec.replicas).'
|
||||
format: int32
|
||||
type: integer
|
||||
type: object
|
||||
organization:
|
||||
pattern: ^[^/]+$
|
||||
type: string
|
||||
@@ -142,7 +160,7 @@ spec:
|
||||
description: 'serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where "pod-specific-string" is managed by the StatefulSet controller.'
|
||||
type: string
|
||||
template:
|
||||
description: template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.
|
||||
description: template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format <statefulsetname>-<podindex>. For example, a pod in a StatefulSet named "web" with index number "3" would be named "web-3".
|
||||
properties:
|
||||
metadata:
|
||||
description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata'
|
||||
@@ -1006,7 +1024,7 @@ spec:
|
||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||
type: string
|
||||
ports:
|
||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
||||
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||
items:
|
||||
description: ContainerPort represents a network port in a single container.
|
||||
properties:
|
||||
@@ -1141,6 +1159,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -1448,9 +1478,9 @@ spec:
|
||||
description: 'EnableServiceLinks indicates whether information about services should be injected into pod''s environment variables, matching the syntax of Docker links. Optional: Defaults to true.'
|
||||
type: boolean
|
||||
ephemeralContainers:
|
||||
description: List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate.
|
||||
description: List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.
|
||||
items:
|
||||
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted. \n This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate."
|
||||
description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted."
|
||||
properties:
|
||||
args:
|
||||
description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
|
||||
@@ -1953,6 +1983,18 @@ spec:
|
||||
resources:
|
||||
description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -2254,6 +2296,9 @@ spec:
|
||||
hostPID:
|
||||
description: 'Use the host''s pid namespace. Optional: Default to false.'
|
||||
type: boolean
|
||||
hostUsers:
|
||||
description: 'Use the host''s user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.'
|
||||
type: boolean
|
||||
hostname:
|
||||
description: Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.
|
||||
type: string
|
||||
@@ -2638,7 +2683,7 @@ spec:
|
||||
description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
|
||||
type: string
|
||||
ports:
|
||||
description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
|
||||
description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.
|
||||
items:
|
||||
description: ContainerPort represents a network port in a single container.
|
||||
properties:
|
||||
@@ -2773,6 +2818,18 @@ spec:
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -3057,7 +3114,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
os:
|
||||
description: "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. \n If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions \n If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup This is a beta field and requires the IdentifyPodOS feature"
|
||||
description: "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. \n If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions \n If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup"
|
||||
properties:
|
||||
name:
|
||||
description: 'Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null'
|
||||
@@ -3096,6 +3153,31 @@ spec:
|
||||
- conditionType
|
||||
type: object
|
||||
type: array
|
||||
resourceClaims:
|
||||
description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.
|
||||
properties:
|
||||
name:
|
||||
description: Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.
|
||||
type: string
|
||||
source:
|
||||
description: Source describes where to find the ResourceClaim.
|
||||
properties:
|
||||
resourceClaimName:
|
||||
description: ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.
|
||||
type: string
|
||||
resourceClaimTemplateName:
|
||||
description: "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. \n The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The name of the ResourceClaim will be <pod name>-<resource name>, where <resource name> is the PodResourceClaim.Name. Pod validation will reject the pod if the concatenated name is not valid for a ResourceClaim (e.g. too long). \n An existing ResourceClaim with that name that is not owned by the pod will not be used for the pod to avoid using an unrelated resource by mistake. Scheduling and pod startup are then blocked until the unrelated ResourceClaim is removed. \n This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim."
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-map-keys:
|
||||
- name
|
||||
x-kubernetes-list-type: map
|
||||
restartPolicy:
|
||||
description: 'Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy'
|
||||
type: string
|
||||
@@ -3105,6 +3187,21 @@ spec:
|
||||
schedulerName:
|
||||
description: If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.
|
||||
type: string
|
||||
schedulingGates:
|
||||
description: "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. \n This is an alpha-level feature enabled by PodSchedulingReadiness feature gate."
|
||||
items:
|
||||
description: PodSchedulingGate is associated to a Pod to guard its scheduling.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the scheduling gate. Each scheduling gate must have a unique name field.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-map-keys:
|
||||
- name
|
||||
x-kubernetes-list-type: map
|
||||
securityContext:
|
||||
description: 'SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.'
|
||||
properties:
|
||||
@@ -3155,7 +3252,7 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
supplementalGroups:
|
||||
description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.
|
||||
description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.
|
||||
items:
|
||||
format: int64
|
||||
type: integer
|
||||
@@ -3270,16 +3367,28 @@ spec:
|
||||
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
matchLabelKeys:
|
||||
description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
maxSkew:
|
||||
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
||||
format: int32
|
||||
type: integer
|
||||
minDomains:
|
||||
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate."
|
||||
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)."
|
||||
format: int32
|
||||
type: integer
|
||||
nodeAffinityPolicy:
|
||||
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
type: string
|
||||
nodeTaintsPolicy:
|
||||
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
type: string
|
||||
topologyKey:
|
||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||
type: string
|
||||
whenUnsatisfiable:
|
||||
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
||||
@@ -3576,7 +3685,7 @@ spec:
|
||||
type: string
|
||||
type: array
|
||||
dataSource:
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.'
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -3592,7 +3701,7 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
dataSourceRef:
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.'
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -3603,6 +3712,9 @@ spec:
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
@@ -3610,6 +3722,18 @@ spec:
|
||||
resources:
|
||||
description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -4292,7 +4416,7 @@ spec:
|
||||
type: string
|
||||
type: array
|
||||
dataSource:
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.'
|
||||
description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -4308,7 +4432,7 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
dataSourceRef:
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.'
|
||||
description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.'
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
|
||||
@@ -4319,6 +4443,9 @@ spec:
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
@@ -4326,6 +4453,18 @@ spec:
|
||||
resources:
|
||||
description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -4468,6 +4607,18 @@ spec:
|
||||
resources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
|
||||
@@ -24,11 +24,13 @@ Due to the above you can't just do a `helm upgrade` to release the latest versio
|
||||
# REMEMBER TO UPDATE THE CHART_VERSION TO RELEVANT CHART VERISON!!!!
|
||||
CHART_VERSION=0.18.0
|
||||
|
||||
curl -L https://github.com/actions-runner-controller/actions-runner-controller/releases/download/actions-runner-controller-${CHART_VERSION}/actions-runner-controller-${CHART_VERSION}.tgz | tar zxv --strip 1 actions-runner-controller/crds
|
||||
curl -L https://github.com/actions/actions-runner-controller/releases/download/actions-runner-controller-${CHART_VERSION}/actions-runner-controller-${CHART_VERSION}.tgz | tar zxv --strip 1 actions-runner-controller/crds
|
||||
|
||||
kubectl replace -f crds/
|
||||
```
|
||||
|
||||
Note that in case you're going to create prometheus-operator `ServiceMonitor` resources via the chart, you'd need to deploy prometheus-operator-related CRDs as well.
|
||||
|
||||
2. Upgrade the Helm release
|
||||
|
||||
```shell
|
||||
|
||||
@@ -0,0 +1,60 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "actions-runner-controller-actions-metrics-server.name" -}}
|
||||
{{- default .Chart.Name .Values.actionsMetricsServer.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "actions-runner-controller-actions-metrics-server.instance" -}}
|
||||
{{- printf "%s-%s" .Release.Name "actions-metrics-server" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "actions-runner-controller-actions-metrics-server.fullname" -}}
|
||||
{{- if .Values.actionsMetricsServer.fullnameOverride }}
|
||||
{{- .Values.actionsMetricsServer.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.actionsMetricsServer.nameOverride }}
|
||||
{{- $instance := include "actions-runner-controller-actions-metrics-server.instance" . }}
|
||||
{{- if contains $name $instance }}
|
||||
{{- $instance | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s-%s" .Release.Name $name "actions-metrics-server" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "actions-runner-controller-actions-metrics-server.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "actions-runner-controller-actions-metrics-server.name" . }}
|
||||
app.kubernetes.io/instance: {{ include "actions-runner-controller-actions-metrics-server.instance" . }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "actions-runner-controller-actions-metrics-server.serviceAccountName" -}}
|
||||
{{- if .Values.actionsMetricsServer.serviceAccount.create }}
|
||||
{{- default (include "actions-runner-controller-actions-metrics-server.fullname" .) .Values.actionsMetricsServer.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.actionsMetricsServer.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "actions-runner-controller-actions-metrics-server.secretName" -}}
|
||||
{{- default (include "actions-runner-controller-actions-metrics-server.fullname" .) .Values.actionsMetricsServer.secret.name }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "actions-runner-controller-actions-metrics-server.roleName" -}}
|
||||
{{- include "actions-runner-controller-actions-metrics-server.fullname" . }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "actions-runner-controller-actions-metrics-server.serviceMonitorName" -}}
|
||||
{{- include "actions-runner-controller-actions-metrics-server.fullname" . | trunc 47 }}-service-monitor
|
||||
{{- end }}
|
||||
@@ -0,0 +1,162 @@
|
||||
{{- if .Values.actionsMetricsServer.enabled }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.actionsMetricsServer.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.actionsMetricsServer.podAnnotations }}
|
||||
annotations:
|
||||
kubectl.kubernetes.io/default-logs-container: "github-webhook-server"
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 8 }}
|
||||
{{- with .Values.actionsMetricsServer.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.actionsMetricsServer.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "actions-runner-controller-actions-metrics-server.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.actionsMetricsServer.podSecurityContext | nindent 8 }}
|
||||
{{- with .Values.actionsMetricsServer.priorityClassName }}
|
||||
priorityClassName: "{{ . }}"
|
||||
{{- end }}
|
||||
containers:
|
||||
- args:
|
||||
{{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
||||
{{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }}
|
||||
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
|
||||
{{- if .Values.actionsMetricsServer.logLevel }}
|
||||
- "--log-level={{ .Values.actionsMetricsServer.logLevel }}"
|
||||
{{- end }}
|
||||
{{- if .Values.runnerGithubURL }}
|
||||
- "--runner-github-url={{ .Values.runnerGithubURL }}"
|
||||
{{- end }}
|
||||
{{- if .Values.actionsMetricsServer.logFormat }}
|
||||
- "--log-format={{ .Values.actionsMetricsServer.logFormat }}"
|
||||
{{- end }}
|
||||
command:
|
||||
- "/actions-metrics-server"
|
||||
env:
|
||||
- name: GITHUB_WEBHOOK_SECRET_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: github_webhook_secret_token
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.secretName" . }}
|
||||
optional: true
|
||||
{{- if .Values.githubEnterpriseServerURL }}
|
||||
- name: GITHUB_ENTERPRISE_URL
|
||||
value: {{ .Values.githubEnterpriseServerURL }}
|
||||
{{- end }}
|
||||
{{- if .Values.githubURL }}
|
||||
- name: GITHUB_URL
|
||||
value: {{ .Values.githubURL }}
|
||||
{{- end }}
|
||||
{{- if .Values.githubUploadURL }}
|
||||
- name: GITHUB_UPLOAD_URL
|
||||
value: {{ .Values.githubUploadURL }}
|
||||
{{- end }}
|
||||
{{- if .Values.actionsMetricsServer.secret.enabled }}
|
||||
- name: GITHUB_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: github_token
|
||||
name: {{ include "actions-runner-controller.githubWebhookServerSecretName" . }}
|
||||
optional: true
|
||||
- name: GITHUB_APP_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: github_app_id
|
||||
name: {{ include "actions-runner-controller.githubWebhookServerSecretName" . }}
|
||||
optional: true
|
||||
- name: GITHUB_APP_INSTALLATION_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: github_app_installation_id
|
||||
name: {{ include "actions-runner-controller.githubWebhookServerSecretName" . }}
|
||||
optional: true
|
||||
- name: GITHUB_APP_PRIVATE_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: github_app_private_key
|
||||
name: {{ include "actions-runner-controller.githubWebhookServerSecretName" . }}
|
||||
optional: true
|
||||
{{- if .Values.authSecret.github_basicauth_username }}
|
||||
- name: GITHUB_BASICAUTH_USERNAME
|
||||
value: {{ .Values.authSecret.github_basicauth_username }}
|
||||
{{- end }}
|
||||
- name: GITHUB_BASICAUTH_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: github_basicauth_password
|
||||
name: {{ include "actions-runner-controller.secretName" . }}
|
||||
optional: true
|
||||
{{- end }}
|
||||
{{- range $key, $val := .Values.actionsMetricsServer.env }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $val | quote }}
|
||||
{{- end }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
||||
name: actions-metrics-server
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
protocol: TCP
|
||||
{{- if not .Values.metrics.proxy.enabled }}
|
||||
- containerPort: {{ .Values.metrics.port }}
|
||||
name: metrics-port
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.actionsMetricsServer.resources | nindent 12 }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.actionsMetricsServer.securityContext | nindent 12 }}
|
||||
{{- if .Values.metrics.proxy.enabled }}
|
||||
- args:
|
||||
- "--secure-listen-address=0.0.0.0:{{ .Values.metrics.port }}"
|
||||
- "--upstream=http://127.0.0.1:8080/"
|
||||
- "--logtostderr=true"
|
||||
- "--v=10"
|
||||
image: "{{ .Values.metrics.proxy.image.repository }}:{{ .Values.metrics.proxy.image.tag }}"
|
||||
name: kube-rbac-proxy
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.metrics.port }}
|
||||
name: metrics-port
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 10
|
||||
{{- with .Values.actionsMetricsServer.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.actionsMetricsServer.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.actionsMetricsServer.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.actionsMetricsServer.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,47 @@
|
||||
{{- if .Values.actionsMetricsServer.ingress.enabled -}}
|
||||
{{- $fullName := include "actions-runner-controller-actions-metrics-server.fullname" . -}}
|
||||
{{- $svcPort := (index .Values.actionsMetricsServer.service.ports 0).port -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||
{{- with .Values.actionsMetricsServer.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.actionsMetricsServer.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.actionsMetricsServer.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.actionsMetricsServer.ingress.ingressClassName }}
|
||||
ingressClassName: {{ . }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.actionsMetricsServer.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- if .extraPaths }}
|
||||
{{- toYaml .extraPaths | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
pathType: {{ .pathType }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
port:
|
||||
number: {{ $svcPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,26 @@
|
||||
{{- if .Values.actionsMetricsServer.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||
{{- if .Values.actionsMetricsServer.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.actionsMetricsServer.service.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.actionsMetricsServer.service.type }}
|
||||
ports:
|
||||
{{ range $_, $port := .Values.actionsMetricsServer.service.ports -}}
|
||||
- {{ $port | toYaml | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.metrics.serviceMonitor }}
|
||||
- name: metrics-port
|
||||
port: {{ .Values.metrics.port }}
|
||||
targetPort: metrics-port
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,15 @@
|
||||
{{- if .Values.actionsMetricsServer.enabled -}}
|
||||
{{- if .Values.actionsMetricsServer.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||
{{- with .Values.actionsMetricsServer.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,25 @@
|
||||
{{- if and .Values.actionsMetricsServer.enabled .Values.actionsMetrics.serviceMonitor }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||
{{- with .Values.actionsMetricsServer.serviceMonitorLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.serviceMonitorName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
endpoints:
|
||||
- path: /metrics
|
||||
port: metrics-port
|
||||
{{- if .Values.actionsMetrics.proxy.enabled }}
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 6 }}
|
||||
{{- end }}
|
||||
@@ -8,6 +8,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "actions-runner-controller.serviceMonitorName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
endpoints:
|
||||
- path: /metrics
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{{- if .Values.podDisruptionBudget.enabled }}
|
||||
apiVersion: policy/v1beta1
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
labels:
|
||||
|
||||
@@ -58,15 +58,18 @@ spec:
|
||||
{{- if .Values.scope.singleNamespace }}
|
||||
- "--watch-namespace={{ default .Release.Namespace .Values.scope.watchNamespace }}"
|
||||
{{- end }}
|
||||
{{- if .Values.githubAPICacheDuration }}
|
||||
- "--github-api-cache-duration={{ .Values.githubAPICacheDuration }}"
|
||||
{{- end }}
|
||||
{{- if .Values.logLevel }}
|
||||
- "--log-level={{ .Values.logLevel }}"
|
||||
{{- end }}
|
||||
{{- if .Values.runnerGithubURL }}
|
||||
- "--runner-github-url={{ .Values.runnerGithubURL }}"
|
||||
{{- end }}
|
||||
{{- if .Values.runner.statusUpdateHook.enabled }}
|
||||
- "--runner-status-update-hook"
|
||||
{{- end }}
|
||||
{{- if .Values.logFormat }}
|
||||
- "--log-format={{ .Values.logFormat }}"
|
||||
{{- end }}
|
||||
command:
|
||||
- "/manager"
|
||||
env:
|
||||
@@ -118,10 +121,14 @@ spec:
|
||||
name: {{ include "actions-runner-controller.secretName" . }}
|
||||
optional: true
|
||||
{{- end }}
|
||||
{{- if kindIs "slice" .Values.env }}
|
||||
{{- toYaml .Values.env | nindent 8 }}
|
||||
{{- else }}
|
||||
{{- range $key, $val := .Values.env }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $val | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
||||
name: manager
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
|
||||
@@ -39,7 +39,6 @@ spec:
|
||||
{{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
||||
{{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }}
|
||||
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
|
||||
- "--sync-period={{ .Values.githubWebhookServer.syncPeriod }}"
|
||||
{{- if .Values.githubWebhookServer.logLevel }}
|
||||
- "--log-level={{ .Values.githubWebhookServer.logLevel }}"
|
||||
{{- end }}
|
||||
@@ -49,6 +48,12 @@ spec:
|
||||
{{- if .Values.runnerGithubURL }}
|
||||
- "--runner-github-url={{ .Values.runnerGithubURL }}"
|
||||
{{- end }}
|
||||
{{- if .Values.githubWebhookServer.queueLimit }}
|
||||
- "--queue-limit={{ .Values.githubWebhookServer.queueLimit }}"
|
||||
{{- end }}
|
||||
{{- if .Values.githubWebhookServer.logFormat }}
|
||||
- "--log-format={{ .Values.githubWebhookServer.logFormat }}"
|
||||
{{- end }}
|
||||
command:
|
||||
- "/github-webhook-server"
|
||||
env:
|
||||
|
||||
@@ -1,13 +1,7 @@
|
||||
{{- if .Values.githubWebhookServer.ingress.enabled -}}
|
||||
{{- $fullName := include "actions-runner-controller-github-webhook-server.fullname" . -}}
|
||||
{{- $svcPort := (index .Values.githubWebhookServer.service.ports 0).port -}}
|
||||
{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" }}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else if .Capabilities.APIVersions.Has "extensions/v1beta1/Ingress" }}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
@@ -42,19 +36,12 @@ spec:
|
||||
{{- end }}
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }}
|
||||
pathType: {{ .pathType }}
|
||||
{{- end }}
|
||||
backend:
|
||||
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }}
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
port:
|
||||
number: {{ $svcPort }}
|
||||
{{- else }}
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{{- if .Values.githubWebhookServer.podDisruptionBudget.enabled }}
|
||||
apiVersion: policy/v1beta1
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
labels:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user