mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 11:41:27 +00:00
Compare commits
1 Commits
51023ade49
...
update-run
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5e85311142 |
4
.github/actions/setup-arc-e2e/action.yaml
vendored
4
.github/actions/setup-arc-e2e/action.yaml
vendored
@@ -36,8 +36,8 @@ runs:
|
||||
driver-opts: image=moby/buildkit:v0.10.6
|
||||
|
||||
- name: Build controller image
|
||||
# https://github.com/docker/build-push-action/releases/tag/v6.18.0
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
|
||||
# https://github.com/docker/build-push-action/releases/tag/v6.15.0
|
||||
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64
|
||||
|
||||
20
.github/workflows/arc-publish-chart.yaml
vendored
20
.github/workflows/arc-publish-chart.yaml
vendored
@@ -40,12 +40,13 @@ jobs:
|
||||
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
@@ -58,12 +59,13 @@ jobs:
|
||||
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v6
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@6ec842c01de15ebb84c8627d2744a0c2f2755c9f
|
||||
# https://github.com/helm/chart-testing-action/releases/tag/v2.7.0
|
||||
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
@@ -79,7 +81,8 @@ jobs:
|
||||
|
||||
- name: Create kind cluster
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
uses: helm/kind-action@92086f6be054225fa813e0a4b13787fc9088faab
|
||||
# https://github.com/helm/kind-action/releases/tag/v1.12.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
|
||||
|
||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||
- name: Install cert-manager
|
||||
@@ -134,7 +137,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -145,7 +148,8 @@ jobs:
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@d17e3a9a36850ea89f35db16c1067dd2b68ee343
|
||||
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
@@ -184,7 +188,7 @@ jobs:
|
||||
# this workaround is intended to move the index.yaml to the target repo
|
||||
# where the github pages are hosted
|
||||
- name: Checkout target repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
|
||||
path: ${{ env.CHART_TARGET_REPO }}
|
||||
|
||||
7
.github/workflows/arc-publish.yaml
vendored
7
.github/workflows/arc-publish.yaml
vendored
@@ -39,9 +39,9 @@ jobs:
|
||||
if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v6
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
|
||||
@@ -73,7 +73,8 @@ jobs:
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@d17e3a9a36850ea89f35db16c1067dd2b68ee343
|
||||
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
|
||||
9
.github/workflows/arc-release-runners.yaml
vendored
9
.github/workflows/arc-release-runners.yaml
vendored
@@ -1,6 +1,4 @@
|
||||
name: Release ARC Runner Images
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||
# for details on why we use this approach
|
||||
@@ -19,7 +17,7 @@ env:
|
||||
PUSH_TO_REGISTRIES: true
|
||||
TARGET_ORG: actions-runner-controller
|
||||
TARGET_WORKFLOW: release-runners.yaml
|
||||
DOCKER_VERSION: 28.0.4
|
||||
DOCKER_VERSION: 24.0.7
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}
|
||||
@@ -30,7 +28,7 @@ jobs:
|
||||
name: Trigger Build and Push of Runner Images
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get runner version
|
||||
id: versions
|
||||
run: |
|
||||
@@ -41,7 +39,8 @@ jobs:
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@d17e3a9a36850ea89f35db16c1067dd2b68ee343
|
||||
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
# This workflows polls releases from actions/runner and in case of a new one it
|
||||
# updates files containing runner version and opens a pull request.
|
||||
name: Runner Updates Check (Scheduled Job)
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: write
|
||||
|
||||
on:
|
||||
schedule:
|
||||
@@ -24,7 +21,7 @@ jobs:
|
||||
container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }}
|
||||
container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get runner current and latest versions
|
||||
id: runner_versions
|
||||
@@ -53,8 +50,6 @@ jobs:
|
||||
# it sets a PR name as output.
|
||||
check_pr:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
needs: check_versions
|
||||
if: needs.check_versions.outputs.runner_current_version != needs.check_versions.outputs.runner_latest_version || needs.check_versions.outputs.container_hooks_current_version != needs.check_versions.outputs.container_hooks_latest_version
|
||||
outputs:
|
||||
@@ -69,7 +64,7 @@ jobs:
|
||||
echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}"
|
||||
echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}"
|
||||
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: PR Name
|
||||
id: pr_name
|
||||
@@ -124,7 +119,7 @@ jobs:
|
||||
PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: New branch
|
||||
run: git checkout -b update-runner-"$(date +%Y-%m-%d)"
|
||||
|
||||
13
.github/workflows/arc-validate-chart.yaml
vendored
13
.github/workflows/arc-validate-chart.yaml
vendored
@@ -40,22 +40,24 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v6
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@6ec842c01de15ebb84c8627d2744a0c2f2755c9f
|
||||
# https://github.com/helm/chart-testing-action/releases/tag/v2.7.0
|
||||
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
@@ -70,7 +72,8 @@ jobs:
|
||||
ct lint --config charts/.ci/ct-config.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@92086f6be054225fa813e0a4b13787fc9088faab
|
||||
# https://github.com/helm/kind-action/releases/tag/v1.12.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
|
||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||
|
||||
4
.github/workflows/arc-validate-runners.yaml
vendored
4
.github/workflows/arc-validate-runners.yaml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
name: runner / shellcheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
- name: "Run shellcheck"
|
||||
run: make shellcheck
|
||||
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
|
||||
22
.github/workflows/gha-e2e-tests.yaml
vendored
22
.github/workflows/gha-e2e-tests.yaml
vendored
@@ -16,7 +16,7 @@ env:
|
||||
TARGET_ORG: actions-runner-controller
|
||||
TARGET_REPO: arc_e2e_test_dummy
|
||||
IMAGE_NAME: "arc-test-image"
|
||||
IMAGE_VERSION: "0.13.0"
|
||||
IMAGE_VERSION: "0.11.0"
|
||||
|
||||
concurrency:
|
||||
# This will make sure we only apply the concurrency limits on pull requests
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -124,7 +124,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -217,7 +217,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: arc-test-dind-workflow.yaml
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -309,7 +309,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -410,7 +410,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -513,7 +513,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -610,7 +610,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -732,7 +732,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-sleepy-matrix.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -904,7 +904,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: arc-test-workflow.yaml
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.head_ref }}
|
||||
|
||||
@@ -984,7 +984,7 @@ jobs:
|
||||
echo "5 pods are up!"
|
||||
break
|
||||
fi
|
||||
if [[ "$count" -ge 30 ]]; then
|
||||
if [[ "$count" -ge 12 ]]; then
|
||||
echo "Timeout waiting for 5 pods to be created"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
25
.github/workflows/gha-publish-chart.yaml
vendored
25
.github/workflows/gha-publish-chart.yaml
vendored
@@ -45,7 +45,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
@@ -72,10 +72,11 @@ jobs:
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130
|
||||
# https://github.com/docker/setup-qemu-action/releases/tag/v3.6.0
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
|
||||
with:
|
||||
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
|
||||
# BuildKit v0.11 which has a bug causing intermittent
|
||||
@@ -84,14 +85,16 @@ jobs:
|
||||
driver-opts: image=moby/buildkit:v0.10.6
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef
|
||||
# https://github.com/docker/login-action/releases/tag/v3.4.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build & push controller image
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
|
||||
# https://github.com/docker/build-push-action/releases/tag/v6.15.0
|
||||
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
@@ -100,6 +103,8 @@ jobs:
|
||||
tags: |
|
||||
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }}
|
||||
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }}-${{ steps.resolve_parameters.outputs.short_sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
@@ -119,7 +124,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
@@ -138,7 +143,8 @@ jobs:
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
@@ -166,7 +172,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
@@ -185,7 +191,8 @@ jobs:
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
|
||||
22
.github/workflows/gha-validate-chart.yaml
vendored
22
.github/workflows/gha-validate-chart.yaml
vendored
@@ -36,22 +36,24 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v6
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@6ec842c01de15ebb84c8627d2744a0c2f2755c9f
|
||||
# https://github.com/helm/chart-testing-action/releases/tag/v2.7.0
|
||||
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
@@ -67,13 +69,14 @@ jobs:
|
||||
ct lint --config charts/.ci/ct-config-gha.yaml
|
||||
|
||||
- name: Set up docker buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Build controller image
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
|
||||
# https://github.com/docker/build-push-action/releases/tag/v6.15.0
|
||||
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
with:
|
||||
file: Dockerfile
|
||||
@@ -88,7 +91,8 @@ jobs:
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@92086f6be054225fa813e0a4b13787fc9088faab
|
||||
# https://github.com/helm/kind-action/releases/tag/v1.12.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
with:
|
||||
cluster_name: chart-testing
|
||||
@@ -111,8 +115,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
- uses: actions/setup-go@v6
|
||||
uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
cache: false
|
||||
|
||||
19
.github/workflows/global-publish-canary.yaml
vendored
19
.github/workflows/global-publish-canary.yaml
vendored
@@ -55,11 +55,12 @@ jobs:
|
||||
TARGET_REPO: actions-runner-controller
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@d17e3a9a36850ea89f35db16c1067dd2b68ee343
|
||||
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
@@ -90,10 +91,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef
|
||||
# https://github.com/docker/login-action/releases/tag/v3.4.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -110,16 +112,19 @@ jobs:
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130
|
||||
# https://github.com/docker/setup-qemu-action/releases/tag/v3.6.0
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435
|
||||
# https://github.com/docker/setup-buildx-action/releases/tag/v3.10.0
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
|
||||
with:
|
||||
version: latest
|
||||
|
||||
# Unstable builds - run at your own risk
|
||||
- name: Build and Push
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
|
||||
# https://github.com/docker/build-push-action/releases/tag/v6.15.0
|
||||
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
|
||||
12
.github/workflows/global-run-codeql.yaml
vendored
12
.github/workflows/global-run-codeql.yaml
vendored
@@ -25,20 +25,20 @@ jobs:
|
||||
security-events: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v4
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: go, actions
|
||||
languages: go
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v4
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v4
|
||||
uses: github/codeql-action/analyze@v2
|
||||
|
||||
@@ -1,10 +1,5 @@
|
||||
name: First Interaction
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
@@ -16,11 +11,11 @@ jobs:
|
||||
check_for_first_interaction:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/first-interaction@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/first-interaction@main
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
issue_message: |
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
issue-message: |
|
||||
Hello! Thank you for filing an issue.
|
||||
|
||||
The maintainers will triage your issue shortly.
|
||||
@@ -28,7 +23,7 @@ jobs:
|
||||
In the meantime, please take a look at the [troubleshooting guide](https://github.com/actions/actions-runner-controller/blob/master/TROUBLESHOOTING.md) for bug reports.
|
||||
|
||||
If this is a feature request, please review our [contribution guidelines](https://github.com/actions/actions-runner-controller/blob/master/CONTRIBUTING.md).
|
||||
pr_message: |
|
||||
pr-message: |
|
||||
Hello! Thank you for your contribution.
|
||||
|
||||
Please review our [contribution guidelines](https://github.com/actions/actions-runner-controller/blob/master/CONTRIBUTING.md) to understand the project's testing and code conventions.
|
||||
|
||||
2
.github/workflows/global-run-stale.yaml
vendored
2
.github/workflows/global-run-stale.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
issues: write # for actions/stale to close stale issues
|
||||
pull-requests: write # for actions/stale to close stale PRs
|
||||
steps:
|
||||
- uses: actions/stale@v10
|
||||
- uses: actions/stale@v6
|
||||
with:
|
||||
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
|
||||
# turn off stale for both issues and PRs
|
||||
|
||||
23
.github/workflows/go.yaml
vendored
23
.github/workflows/go.yaml
vendored
@@ -29,8 +29,8 @@ jobs:
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-go@v6
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
cache: false
|
||||
@@ -42,22 +42,23 @@ jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-go@v6
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
cache: false
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@0a35821d5c230e903fcfe077583637dea1b27b47
|
||||
# https://github.com/golangci/golangci-lint-action/releases/tag/v7.0.0
|
||||
uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd
|
||||
with:
|
||||
only-new-issues: true
|
||||
version: v2.5.0
|
||||
version: v2.1.2
|
||||
|
||||
generate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-go@v6
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
cache: false
|
||||
@@ -69,8 +70,8 @@ jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-go@v6
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
- run: make manifests
|
||||
@@ -78,7 +79,7 @@ jobs:
|
||||
run: git diff --exit-code
|
||||
- name: Install kubebuilder
|
||||
run: |
|
||||
curl -D headers.txt -fsL "https://storage.googleapis.com/kubebuilder-tools/kubebuilder-tools-1.30.0-linux-amd64.tar.gz" -o kubebuilder-tools
|
||||
curl -D headers.txt -fsL "https://storage.googleapis.com/kubebuilder-tools/kubebuilder-tools-1.26.1-linux-amd64.tar.gz" -o kubebuilder-tools
|
||||
echo "$(grep -i etag headers.txt -m 1 | cut -d'"' -f2) kubebuilder-tools" > sum
|
||||
md5sum -c sum
|
||||
tar -zvxf kubebuilder-tools
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
# actions-runner-controller maintainers
|
||||
* @mumoshu @toast-gear @actions/actions-launch @actions/actions-compute @nikola-jokic @rentziass
|
||||
* @mumoshu @toast-gear @actions/actions-launch @nikola-jokic @rentziass
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build the manager binary
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25.1 AS builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24.0 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
@@ -30,7 +30,7 @@ ARG TARGETPLATFORM TARGETOS TARGETARCH TARGETVARIANT VERSION=dev COMMIT_SHA=dev
|
||||
# to avoid https://github.com/moby/buildkit/issues/2334
|
||||
# We can use docker layer cache so the build is fast enogh anyway
|
||||
# We also use per-platform GOCACHE for the same reason.
|
||||
ENV GOCACHE="/build/${TARGETPLATFORM}/root/.cache/go-build"
|
||||
ENV GOCACHE /build/${TARGETPLATFORM}/root/.cache/go-build
|
||||
|
||||
# Build
|
||||
RUN --mount=target=. \
|
||||
|
||||
11
Makefile
11
Makefile
@@ -6,7 +6,7 @@ endif
|
||||
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
|
||||
VERSION ?= dev
|
||||
COMMIT_SHA = $(shell git rev-parse HEAD)
|
||||
RUNNER_VERSION ?= 2.329.0
|
||||
RUNNER_VERSION ?= 2.323.0
|
||||
TARGETPLATFORM ?= $(shell arch)
|
||||
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||
RUNNER_TAG ?= ${VERSION}
|
||||
@@ -68,7 +68,7 @@ endif
|
||||
all: manager
|
||||
|
||||
lint:
|
||||
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v2.5.0 golangci-lint run
|
||||
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v2.1.2 golangci-lint run
|
||||
|
||||
GO_TEST_ARGS ?= -short
|
||||
|
||||
@@ -117,6 +117,9 @@ manifests: manifests-gen-crds chart-crds
|
||||
|
||||
manifests-gen-crds: controller-gen yq
|
||||
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||
for YAMLFILE in config/crd/bases/actions*.yaml; do \
|
||||
$(YQ) '.spec.preserveUnknownFields = false' --inplace "$$YAMLFILE" ; \
|
||||
done
|
||||
make manifests-gen-crds-fix DELETE_KEY=x-kubernetes-list-type
|
||||
make manifests-gen-crds-fix DELETE_KEY=x-kubernetes-list-map-keys
|
||||
|
||||
@@ -307,7 +310,7 @@ github-release: release
|
||||
# Otherwise we get errors like the below:
|
||||
# Error: failed to install CRD crds/actions.summerwind.dev_runnersets.yaml: CustomResourceDefinition.apiextensions.k8s.io "runnersets.actions.summerwind.dev" is invalid: [spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[containers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property, spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[initContainers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property]
|
||||
#
|
||||
# Note that controller-gen newer than 0.8.0 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
|
||||
# Note that controller-gen newer than 0.7.0 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
|
||||
# Otherwise ObjectMeta embedded in Spec results in empty on the storage.
|
||||
controller-gen:
|
||||
ifeq (, $(shell which controller-gen))
|
||||
@@ -317,7 +320,7 @@ ifeq (, $(wildcard $(GOBIN)/controller-gen))
|
||||
CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\
|
||||
cd $$CONTROLLER_GEN_TMP_DIR ;\
|
||||
go mod init tmp ;\
|
||||
go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.19.0 ;\
|
||||
go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.17.2 ;\
|
||||
rm -rf $$CONTROLLER_GEN_TMP_DIR ;\
|
||||
}
|
||||
endif
|
||||
|
||||
@@ -1,89 +0,0 @@
|
||||
package appconfig
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
type AppConfig struct {
|
||||
AppID string `json:"github_app_id"`
|
||||
AppInstallationID int64 `json:"github_app_installation_id"`
|
||||
AppPrivateKey string `json:"github_app_private_key"`
|
||||
|
||||
Token string `json:"github_token"`
|
||||
}
|
||||
|
||||
func (c *AppConfig) tidy() *AppConfig {
|
||||
if len(c.Token) > 0 {
|
||||
return &AppConfig{
|
||||
Token: c.Token,
|
||||
}
|
||||
}
|
||||
|
||||
return &AppConfig{
|
||||
AppID: c.AppID,
|
||||
AppInstallationID: c.AppInstallationID,
|
||||
AppPrivateKey: c.AppPrivateKey,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *AppConfig) Validate() error {
|
||||
if c == nil {
|
||||
return fmt.Errorf("missing app config")
|
||||
}
|
||||
hasToken := len(c.Token) > 0
|
||||
hasGitHubAppAuth := c.hasGitHubAppAuth()
|
||||
if hasToken && hasGitHubAppAuth {
|
||||
return fmt.Errorf("both PAT and GitHub App credentials provided. should only provide one")
|
||||
}
|
||||
if !hasToken && !hasGitHubAppAuth {
|
||||
return fmt.Errorf("no credentials provided: either a PAT or GitHub App credentials should be provided")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *AppConfig) hasGitHubAppAuth() bool {
|
||||
return len(c.AppID) > 0 && c.AppInstallationID > 0 && len(c.AppPrivateKey) > 0
|
||||
}
|
||||
|
||||
func FromSecret(secret *corev1.Secret) (*AppConfig, error) {
|
||||
var appInstallationID int64
|
||||
if v := string(secret.Data["github_app_installation_id"]); v != "" {
|
||||
val, err := strconv.ParseInt(v, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
appInstallationID = val
|
||||
}
|
||||
|
||||
cfg := &AppConfig{
|
||||
Token: string(secret.Data["github_token"]),
|
||||
AppID: string(secret.Data["github_app_id"]),
|
||||
AppInstallationID: appInstallationID,
|
||||
AppPrivateKey: string(secret.Data["github_app_private_key"]),
|
||||
}
|
||||
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("failed to validate config: %v", err)
|
||||
}
|
||||
|
||||
return cfg.tidy(), nil
|
||||
}
|
||||
|
||||
func FromJSONString(v string) (*AppConfig, error) {
|
||||
var appConfig AppConfig
|
||||
if err := json.NewDecoder(bytes.NewBufferString(v)).Decode(&appConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := appConfig.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("failed to validate app config decoded from string: %w", err)
|
||||
}
|
||||
|
||||
return appConfig.tidy(), nil
|
||||
}
|
||||
@@ -1,152 +0,0 @@
|
||||
package appconfig
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestAppConfigValidate_invalid(t *testing.T) {
|
||||
tt := map[string]*AppConfig{
|
||||
"empty": {},
|
||||
"token and app config": {
|
||||
AppID: "1",
|
||||
AppInstallationID: 2,
|
||||
AppPrivateKey: "private key",
|
||||
Token: "token",
|
||||
},
|
||||
"app id not set": {
|
||||
AppInstallationID: 2,
|
||||
AppPrivateKey: "private key",
|
||||
},
|
||||
"app installation id not set": {
|
||||
AppID: "2",
|
||||
AppPrivateKey: "private key",
|
||||
},
|
||||
"private key empty": {
|
||||
AppID: "2",
|
||||
AppInstallationID: 1,
|
||||
AppPrivateKey: "",
|
||||
},
|
||||
}
|
||||
|
||||
for name, cfg := range tt {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
err := cfg.Validate()
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppConfigValidate_valid(t *testing.T) {
|
||||
tt := map[string]*AppConfig{
|
||||
"token": {
|
||||
Token: "token",
|
||||
},
|
||||
"app ID": {
|
||||
AppID: "1",
|
||||
AppInstallationID: 2,
|
||||
AppPrivateKey: "private key",
|
||||
},
|
||||
}
|
||||
|
||||
for name, cfg := range tt {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
err := cfg.Validate()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppConfigFromSecret_invalid(t *testing.T) {
|
||||
tt := map[string]map[string]string{
|
||||
"empty": {},
|
||||
"token and app provided": {
|
||||
"github_token": "token",
|
||||
"github_app_id": "2",
|
||||
"githu_app_installation_id": "3",
|
||||
"github_app_private_key": "private key",
|
||||
},
|
||||
"invalid app id": {
|
||||
"github_app_id": "abc",
|
||||
"githu_app_installation_id": "3",
|
||||
"github_app_private_key": "private key",
|
||||
},
|
||||
"invalid app installation_id": {
|
||||
"github_app_id": "1",
|
||||
"githu_app_installation_id": "abc",
|
||||
"github_app_private_key": "private key",
|
||||
},
|
||||
"empty private key": {
|
||||
"github_app_id": "1",
|
||||
"githu_app_installation_id": "2",
|
||||
"github_app_private_key": "",
|
||||
},
|
||||
}
|
||||
|
||||
for name, data := range tt {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
secret := &corev1.Secret{
|
||||
StringData: data,
|
||||
}
|
||||
|
||||
appConfig, err := FromSecret(secret)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, appConfig)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppConfigFromSecret_valid(t *testing.T) {
|
||||
tt := map[string]map[string]string{
|
||||
"with token": {
|
||||
"github_token": "token",
|
||||
},
|
||||
"app config": {
|
||||
"github_app_id": "2",
|
||||
"githu_app_installation_id": "3",
|
||||
"github_app_private_key": "private key",
|
||||
},
|
||||
}
|
||||
|
||||
for name, data := range tt {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
secret := &corev1.Secret{
|
||||
StringData: data,
|
||||
}
|
||||
|
||||
appConfig, err := FromSecret(secret)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, appConfig)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppConfigFromString_valid(t *testing.T) {
|
||||
tt := map[string]*AppConfig{
|
||||
"token": {
|
||||
Token: "token",
|
||||
},
|
||||
"app ID": {
|
||||
AppID: "1",
|
||||
AppInstallationID: 2,
|
||||
AppPrivateKey: "private key",
|
||||
},
|
||||
}
|
||||
|
||||
for name, cfg := range tt {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
bytes, err := json.Marshal(cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := FromJSONString(string(bytes))
|
||||
require.NoError(t, err)
|
||||
|
||||
want := cfg.tidy()
|
||||
assert.Equal(t, want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -59,10 +59,7 @@ type AutoscalingListenerSpec struct {
|
||||
Proxy *ProxyConfig `json:"proxy,omitempty"`
|
||||
|
||||
// +optional
|
||||
GitHubServerTLS *TLSConfig `json:"githubServerTLS,omitempty"`
|
||||
|
||||
// +optional
|
||||
VaultConfig *VaultConfig `json:"vaultConfig,omitempty"`
|
||||
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"`
|
||||
|
||||
// +optional
|
||||
Metrics *MetricsConfig `json:"metrics,omitempty"`
|
||||
@@ -90,6 +87,7 @@ type AutoscalingListener struct {
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// AutoscalingListenerList contains a list of AutoscalingListener
|
||||
type AutoscalingListenerList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/actions/actions-runner-controller/hash"
|
||||
"github.com/actions/actions-runner-controller/vault"
|
||||
"golang.org/x/net/http/httpproxy"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -70,10 +69,7 @@ type AutoscalingRunnerSetSpec struct {
|
||||
Proxy *ProxyConfig `json:"proxy,omitempty"`
|
||||
|
||||
// +optional
|
||||
GitHubServerTLS *TLSConfig `json:"githubServerTLS,omitempty"`
|
||||
|
||||
// +optional
|
||||
VaultConfig *VaultConfig `json:"vaultConfig,omitempty"`
|
||||
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"`
|
||||
|
||||
// Required
|
||||
Template corev1.PodTemplateSpec `json:"template,omitempty"`
|
||||
@@ -93,12 +89,12 @@ type AutoscalingRunnerSetSpec struct {
|
||||
MinRunners *int `json:"minRunners,omitempty"`
|
||||
}
|
||||
|
||||
type TLSConfig struct {
|
||||
type GitHubServerTLSConfig struct {
|
||||
// Required
|
||||
CertificateFrom *TLSCertificateSource `json:"certificateFrom,omitempty"`
|
||||
}
|
||||
|
||||
func (c *TLSConfig) ToCertPool(keyFetcher func(name, key string) ([]byte, error)) (*x509.CertPool, error) {
|
||||
func (c *GitHubServerTLSConfig) ToCertPool(keyFetcher func(name, key string) ([]byte, error)) (*x509.CertPool, error) {
|
||||
if c.CertificateFrom == nil {
|
||||
return nil, fmt.Errorf("certificateFrom not specified")
|
||||
}
|
||||
@@ -146,7 +142,7 @@ type ProxyConfig struct {
|
||||
NoProxy []string `json:"noProxy,omitempty"`
|
||||
}
|
||||
|
||||
func (c *ProxyConfig) ToHTTPProxyConfig(secretFetcher func(string) (*corev1.Secret, error)) (*httpproxy.Config, error) {
|
||||
func (c *ProxyConfig) toHTTPProxyConfig(secretFetcher func(string) (*corev1.Secret, error)) (*httpproxy.Config, error) {
|
||||
config := &httpproxy.Config{
|
||||
NoProxy: strings.Join(c.NoProxy, ","),
|
||||
}
|
||||
@@ -205,7 +201,7 @@ func (c *ProxyConfig) ToHTTPProxyConfig(secretFetcher func(string) (*corev1.Secr
|
||||
}
|
||||
|
||||
func (c *ProxyConfig) ToSecretData(secretFetcher func(string) (*corev1.Secret, error)) (map[string][]byte, error) {
|
||||
config, err := c.ToHTTPProxyConfig(secretFetcher)
|
||||
config, err := c.toHTTPProxyConfig(secretFetcher)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -219,7 +215,7 @@ func (c *ProxyConfig) ToSecretData(secretFetcher func(string) (*corev1.Secret, e
|
||||
}
|
||||
|
||||
func (c *ProxyConfig) ProxyFunc(secretFetcher func(string) (*corev1.Secret, error)) (func(*http.Request) (*url.URL, error), error) {
|
||||
config, err := c.ToHTTPProxyConfig(secretFetcher)
|
||||
config, err := c.toHTTPProxyConfig(secretFetcher)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -239,26 +235,6 @@ type ProxyServerConfig struct {
|
||||
CredentialSecretRef string `json:"credentialSecretRef,omitempty"`
|
||||
}
|
||||
|
||||
type VaultConfig struct {
|
||||
// +optional
|
||||
Type vault.VaultType `json:"type,omitempty"`
|
||||
// +optional
|
||||
AzureKeyVault *AzureKeyVaultConfig `json:"azureKeyVault,omitempty"`
|
||||
// +optional
|
||||
Proxy *ProxyConfig `json:"proxy,omitempty"`
|
||||
}
|
||||
|
||||
type AzureKeyVaultConfig struct {
|
||||
// +required
|
||||
URL string `json:"url,omitempty"`
|
||||
// +required
|
||||
TenantID string `json:"tenantId,omitempty"`
|
||||
// +required
|
||||
ClientID string `json:"clientId,omitempty"`
|
||||
// +required
|
||||
CertificatePath string `json:"certificatePath,omitempty"`
|
||||
}
|
||||
|
||||
// MetricsConfig holds configuration parameters for each metric type
|
||||
type MetricsConfig struct {
|
||||
// +optional
|
||||
@@ -309,33 +285,6 @@ func (ars *AutoscalingRunnerSet) ListenerSpecHash() string {
|
||||
return hash.ComputeTemplateHash(&spec)
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) GitHubConfigSecret() string {
|
||||
return ars.Spec.GitHubConfigSecret
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) GitHubConfigUrl() string {
|
||||
return ars.Spec.GitHubConfigUrl
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) GitHubProxy() *ProxyConfig {
|
||||
return ars.Spec.Proxy
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) GitHubServerTLS() *TLSConfig {
|
||||
return ars.Spec.GitHubServerTLS
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) VaultConfig() *VaultConfig {
|
||||
return ars.Spec.VaultConfig
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) VaultProxy() *ProxyConfig {
|
||||
if ars.Spec.VaultConfig != nil {
|
||||
return ars.Spec.VaultConfig.Proxy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) RunnerSetSpecHash() string {
|
||||
type runnerSetSpec struct {
|
||||
GitHubConfigUrl string
|
||||
@@ -343,7 +292,7 @@ func (ars *AutoscalingRunnerSet) RunnerSetSpecHash() string {
|
||||
RunnerGroup string
|
||||
RunnerScaleSetName string
|
||||
Proxy *ProxyConfig
|
||||
GitHubServerTLS *TLSConfig
|
||||
GitHubServerTLS *GitHubServerTLSConfig
|
||||
Template corev1.PodTemplateSpec
|
||||
}
|
||||
spec := &runnerSetSpec{
|
||||
|
||||
@@ -34,7 +34,6 @@ const EphemeralRunnerContainerName = "runner"
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.jobWorkflowRef",name=JobWorkflowRef,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.workflowRunId",name=WorkflowRunId,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.jobDisplayName",name=JobDisplayName,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.jobId",name=JobId,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.message",name=Message,type=string
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||
|
||||
@@ -51,10 +50,6 @@ func (er *EphemeralRunner) IsDone() bool {
|
||||
return er.Status.Phase == corev1.PodSucceeded || er.Status.Phase == corev1.PodFailed
|
||||
}
|
||||
|
||||
func (er *EphemeralRunner) HasJob() bool {
|
||||
return len(er.Status.JobID) > 0
|
||||
}
|
||||
|
||||
func (er *EphemeralRunner) HasContainerHookConfigured() bool {
|
||||
for i := range er.Spec.Spec.Containers {
|
||||
if er.Spec.Spec.Containers[i].Name != EphemeralRunnerContainerName {
|
||||
@@ -72,33 +67,6 @@ func (er *EphemeralRunner) HasContainerHookConfigured() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (er *EphemeralRunner) GitHubConfigSecret() string {
|
||||
return er.Spec.GitHubConfigSecret
|
||||
}
|
||||
|
||||
func (er *EphemeralRunner) GitHubConfigUrl() string {
|
||||
return er.Spec.GitHubConfigUrl
|
||||
}
|
||||
|
||||
func (er *EphemeralRunner) GitHubProxy() *ProxyConfig {
|
||||
return er.Spec.Proxy
|
||||
}
|
||||
|
||||
func (er *EphemeralRunner) GitHubServerTLS() *TLSConfig {
|
||||
return er.Spec.GitHubServerTLS
|
||||
}
|
||||
|
||||
func (er *EphemeralRunner) VaultConfig() *VaultConfig {
|
||||
return er.Spec.VaultConfig
|
||||
}
|
||||
|
||||
func (er *EphemeralRunner) VaultProxy() *ProxyConfig {
|
||||
if er.Spec.VaultConfig != nil {
|
||||
return er.Spec.VaultConfig.Proxy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EphemeralRunnerSpec defines the desired state of EphemeralRunner
|
||||
type EphemeralRunnerSpec struct {
|
||||
// +required
|
||||
@@ -107,9 +75,6 @@ type EphemeralRunnerSpec struct {
|
||||
// +required
|
||||
GitHubConfigSecret string `json:"githubConfigSecret,omitempty"`
|
||||
|
||||
// +optional
|
||||
GitHubServerTLS *TLSConfig `json:"githubServerTLS,omitempty"`
|
||||
|
||||
// +required
|
||||
RunnerScaleSetId int `json:"runnerScaleSetId,omitempty"`
|
||||
|
||||
@@ -120,7 +85,7 @@ type EphemeralRunnerSpec struct {
|
||||
ProxySecretRef string `json:"proxySecretRef,omitempty"`
|
||||
|
||||
// +optional
|
||||
VaultConfig *VaultConfig `json:"vaultConfig,omitempty"`
|
||||
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"`
|
||||
|
||||
corev1.PodTemplateSpec `json:",inline"`
|
||||
}
|
||||
@@ -150,16 +115,15 @@ type EphemeralRunnerStatus struct {
|
||||
RunnerId int `json:"runnerId,omitempty"`
|
||||
// +optional
|
||||
RunnerName string `json:"runnerName,omitempty"`
|
||||
// +optional
|
||||
RunnerJITConfig string `json:"runnerJITConfig,omitempty"`
|
||||
|
||||
// +optional
|
||||
Failures map[string]metav1.Time `json:"failures,omitempty"`
|
||||
Failures map[string]bool `json:"failures,omitempty"`
|
||||
|
||||
// +optional
|
||||
JobRequestId int64 `json:"jobRequestId,omitempty"`
|
||||
|
||||
// +optional
|
||||
JobID string `json:"jobId,omitempty"`
|
||||
|
||||
// +optional
|
||||
JobRepositoryName string `json:"jobRepositoryName,omitempty"`
|
||||
|
||||
@@ -173,20 +137,6 @@ type EphemeralRunnerStatus struct {
|
||||
JobDisplayName string `json:"jobDisplayName,omitempty"`
|
||||
}
|
||||
|
||||
func (s *EphemeralRunnerStatus) LastFailure() metav1.Time {
|
||||
var maxTime metav1.Time
|
||||
if len(s.Failures) == 0 {
|
||||
return maxTime
|
||||
}
|
||||
|
||||
for _, ts := range s.Failures {
|
||||
if ts.After(maxTime.Time) {
|
||||
maxTime = ts
|
||||
}
|
||||
}
|
||||
return maxTime
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// EphemeralRunnerList contains a list of EphemeralRunner
|
||||
|
||||
@@ -60,35 +60,9 @@ type EphemeralRunnerSet struct {
|
||||
Status EphemeralRunnerSetStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
func (ers *EphemeralRunnerSet) GitHubConfigSecret() string {
|
||||
return ers.Spec.EphemeralRunnerSpec.GitHubConfigSecret
|
||||
}
|
||||
|
||||
func (ers *EphemeralRunnerSet) GitHubConfigUrl() string {
|
||||
return ers.Spec.EphemeralRunnerSpec.GitHubConfigUrl
|
||||
}
|
||||
|
||||
func (ers *EphemeralRunnerSet) GitHubProxy() *ProxyConfig {
|
||||
return ers.Spec.EphemeralRunnerSpec.Proxy
|
||||
}
|
||||
|
||||
func (ers *EphemeralRunnerSet) GitHubServerTLS() *TLSConfig {
|
||||
return ers.Spec.EphemeralRunnerSpec.GitHubServerTLS
|
||||
}
|
||||
|
||||
func (ers *EphemeralRunnerSet) VaultConfig() *VaultConfig {
|
||||
return ers.Spec.EphemeralRunnerSpec.VaultConfig
|
||||
}
|
||||
|
||||
func (ers *EphemeralRunnerSet) VaultProxy() *ProxyConfig {
|
||||
if ers.Spec.EphemeralRunnerSpec.VaultConfig != nil {
|
||||
return ers.Spec.EphemeralRunnerSpec.VaultConfig.Proxy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// EphemeralRunnerSetList contains a list of EphemeralRunnerSet
|
||||
// +kubebuilder:object:root=true
|
||||
type EphemeralRunnerSetList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
func TestGitHubServerTLSConfig_ToCertPool(t *testing.T) {
|
||||
t.Run("returns an error if CertificateFrom not specified", func(t *testing.T) {
|
||||
c := &v1alpha1.TLSConfig{
|
||||
c := &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: nil,
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ func TestGitHubServerTLSConfig_ToCertPool(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("returns an error if CertificateFrom.ConfigMapKeyRef not specified", func(t *testing.T) {
|
||||
c := &v1alpha1.TLSConfig{
|
||||
c := &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{},
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func TestGitHubServerTLSConfig_ToCertPool(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("returns a valid cert pool with correct configuration", func(t *testing.T) {
|
||||
c := &v1alpha1.TLSConfig{
|
||||
c := &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
package v1alpha1
|
||||
|
||||
import "strings"
|
||||
|
||||
func IsVersionAllowed(resourceVersion, buildVersion string) bool {
|
||||
if buildVersion == "dev" || resourceVersion == buildVersion || strings.HasPrefix(buildVersion, "canary-") {
|
||||
return true
|
||||
}
|
||||
|
||||
rv, ok := parseSemver(resourceVersion)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
bv, ok := parseSemver(buildVersion)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return rv.major == bv.major && rv.minor == bv.minor
|
||||
}
|
||||
|
||||
type semver struct {
|
||||
major string
|
||||
minor string
|
||||
}
|
||||
|
||||
func parseSemver(v string) (p semver, ok bool) {
|
||||
if v == "" {
|
||||
return
|
||||
}
|
||||
p.major, v, ok = parseInt(v)
|
||||
if !ok {
|
||||
return p, false
|
||||
}
|
||||
if v == "" {
|
||||
p.minor = "0"
|
||||
return p, true
|
||||
}
|
||||
if v[0] != '.' {
|
||||
return p, false
|
||||
}
|
||||
p.minor, v, ok = parseInt(v[1:])
|
||||
if !ok {
|
||||
return p, false
|
||||
}
|
||||
if v == "" {
|
||||
return p, true
|
||||
}
|
||||
if v[0] != '.' {
|
||||
return p, false
|
||||
}
|
||||
if _, _, ok = parseInt(v[1:]); !ok {
|
||||
return p, false
|
||||
}
|
||||
return p, true
|
||||
}
|
||||
|
||||
func parseInt(v string) (t, rest string, ok bool) {
|
||||
if v == "" {
|
||||
return
|
||||
}
|
||||
if v[0] < '0' || '9' < v[0] {
|
||||
return
|
||||
}
|
||||
i := 1
|
||||
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
|
||||
i++
|
||||
}
|
||||
if v[0] == '0' && i != 1 {
|
||||
return
|
||||
}
|
||||
return v[:i], v[i:], true
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
package v1alpha1_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestIsVersionAllowed(t *testing.T) {
|
||||
t.Parallel()
|
||||
tt := map[string]struct {
|
||||
resourceVersion string
|
||||
buildVersion string
|
||||
want bool
|
||||
}{
|
||||
"dev should always be allowed": {
|
||||
resourceVersion: "0.11.0",
|
||||
buildVersion: "dev",
|
||||
want: true,
|
||||
},
|
||||
"resourceVersion is not semver": {
|
||||
resourceVersion: "dev",
|
||||
buildVersion: "0.11.0",
|
||||
want: false,
|
||||
},
|
||||
"buildVersion is not semver": {
|
||||
resourceVersion: "0.11.0",
|
||||
buildVersion: "NA",
|
||||
want: false,
|
||||
},
|
||||
"major version mismatch": {
|
||||
resourceVersion: "0.11.0",
|
||||
buildVersion: "1.11.0",
|
||||
want: false,
|
||||
},
|
||||
"minor version mismatch": {
|
||||
resourceVersion: "0.11.0",
|
||||
buildVersion: "0.10.0",
|
||||
want: false,
|
||||
},
|
||||
"patch version mismatch": {
|
||||
resourceVersion: "0.11.1",
|
||||
buildVersion: "0.11.0",
|
||||
want: true,
|
||||
},
|
||||
"arbitrary version match": {
|
||||
resourceVersion: "abc",
|
||||
buildVersion: "abc",
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range tt {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got := v1alpha1.IsVersionAllowed(tc.resourceVersion, tc.buildVersion)
|
||||
assert.Equal(t, tc.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -22,7 +22,6 @@ package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@@ -100,12 +99,7 @@ func (in *AutoscalingListenerSpec) DeepCopyInto(out *AutoscalingListenerSpec) {
|
||||
}
|
||||
if in.GitHubServerTLS != nil {
|
||||
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
|
||||
*out = new(TLSConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.VaultConfig != nil {
|
||||
in, out := &in.VaultConfig, &out.VaultConfig
|
||||
*out = new(VaultConfig)
|
||||
*out = new(GitHubServerTLSConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Metrics != nil {
|
||||
@@ -214,12 +208,7 @@ func (in *AutoscalingRunnerSetSpec) DeepCopyInto(out *AutoscalingRunnerSetSpec)
|
||||
}
|
||||
if in.GitHubServerTLS != nil {
|
||||
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
|
||||
*out = new(TLSConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.VaultConfig != nil {
|
||||
in, out := &in.VaultConfig, &out.VaultConfig
|
||||
*out = new(VaultConfig)
|
||||
*out = new(GitHubServerTLSConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.Template.DeepCopyInto(&out.Template)
|
||||
@@ -270,21 +259,6 @@ func (in *AutoscalingRunnerSetStatus) DeepCopy() *AutoscalingRunnerSetStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AzureKeyVaultConfig) DeepCopyInto(out *AzureKeyVaultConfig) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureKeyVaultConfig.
|
||||
func (in *AzureKeyVaultConfig) DeepCopy() *AzureKeyVaultConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AzureKeyVaultConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CounterMetric) DeepCopyInto(out *CounterMetric) {
|
||||
*out = *in
|
||||
@@ -457,19 +431,14 @@ func (in *EphemeralRunnerSetStatus) DeepCopy() *EphemeralRunnerSetStatus {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralRunnerSpec) DeepCopyInto(out *EphemeralRunnerSpec) {
|
||||
*out = *in
|
||||
if in.GitHubServerTLS != nil {
|
||||
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
|
||||
*out = new(TLSConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Proxy != nil {
|
||||
in, out := &in.Proxy, &out.Proxy
|
||||
*out = new(ProxyConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.VaultConfig != nil {
|
||||
in, out := &in.VaultConfig, &out.VaultConfig
|
||||
*out = new(VaultConfig)
|
||||
if in.GitHubServerTLS != nil {
|
||||
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
|
||||
*out = new(GitHubServerTLSConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.PodTemplateSpec.DeepCopyInto(&out.PodTemplateSpec)
|
||||
@@ -490,9 +459,9 @@ func (in *EphemeralRunnerStatus) DeepCopyInto(out *EphemeralRunnerStatus) {
|
||||
*out = *in
|
||||
if in.Failures != nil {
|
||||
in, out := &in.Failures, &out.Failures
|
||||
*out = make(map[string]metav1.Time, len(*in))
|
||||
*out = make(map[string]bool, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -527,6 +496,26 @@ func (in *GaugeMetric) DeepCopy() *GaugeMetric {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GitHubServerTLSConfig) DeepCopyInto(out *GitHubServerTLSConfig) {
|
||||
*out = *in
|
||||
if in.CertificateFrom != nil {
|
||||
in, out := &in.CertificateFrom, &out.CertificateFrom
|
||||
*out = new(TLSCertificateSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubServerTLSConfig.
|
||||
func (in *GitHubServerTLSConfig) DeepCopy() *GitHubServerTLSConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GitHubServerTLSConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HistogramMetric) DeepCopyInto(out *HistogramMetric) {
|
||||
*out = *in
|
||||
@@ -679,48 +668,3 @@ func (in *TLSCertificateSource) DeepCopy() *TLSCertificateSource {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TLSConfig) DeepCopyInto(out *TLSConfig) {
|
||||
*out = *in
|
||||
if in.CertificateFrom != nil {
|
||||
in, out := &in.CertificateFrom, &out.CertificateFrom
|
||||
*out = new(TLSCertificateSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig.
|
||||
func (in *TLSConfig) DeepCopy() *TLSConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TLSConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VaultConfig) DeepCopyInto(out *VaultConfig) {
|
||||
*out = *in
|
||||
if in.AzureKeyVault != nil {
|
||||
in, out := &in.AzureKeyVault, &out.AzureKeyVault
|
||||
*out = new(AzureKeyVaultConfig)
|
||||
**out = **in
|
||||
}
|
||||
if in.Proxy != nil {
|
||||
in, out := &in.Proxy, &out.Proxy
|
||||
*out = new(ProxyConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultConfig.
|
||||
func (in *VaultConfig) DeepCopy() *VaultConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VaultConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
|
||||
| `metrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||
| `metrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
|
||||
| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `Release.Namespace` (the default namespace of the helm chart). |
|
||||
| `metrics.serviceMonitor.namespace | Namespace which Prometheus is running in | `Release.Namespace` (the default namespace of the helm chart). |
|
||||
| `metrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
|
||||
| `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | |
|
||||
| `metrics.port` | Set port of metrics service | 8443 |
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.19.0
|
||||
controller-gen.kubebuilder.io/version: v0.17.2
|
||||
name: horizontalrunnerautoscalers.actions.summerwind.dev
|
||||
spec:
|
||||
group: actions.summerwind.dev
|
||||
@@ -32,8 +32,7 @@ spec:
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
|
||||
API
|
||||
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
@@ -53,8 +52,7 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: HorizontalRunnerAutoscalerSpec defines the desired state
|
||||
of HorizontalRunnerAutoscaler
|
||||
description: HorizontalRunnerAutoscalerSpec defines the desired state of HorizontalRunnerAutoscaler
|
||||
properties:
|
||||
capacityReservations:
|
||||
items:
|
||||
@@ -85,12 +83,10 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
maxReplicas:
|
||||
description: MaxReplicas is the maximum number of replicas the deployment
|
||||
is allowed to scale
|
||||
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
|
||||
type: integer
|
||||
metrics:
|
||||
description: Metrics is the collection of various metric targets to
|
||||
calculate desired number of runners
|
||||
description: Metrics is the collection of various metric targets to calculate desired number of runners
|
||||
items:
|
||||
properties:
|
||||
repositoryNames:
|
||||
@@ -138,8 +134,7 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
minReplicas:
|
||||
description: MinReplicas is the minimum number of replicas the deployment
|
||||
is allowed to scale
|
||||
description: MinReplicas is the minimum number of replicas the deployment is allowed to scale
|
||||
type: integer
|
||||
scaleDownDelaySecondsAfterScaleOut:
|
||||
description: |-
|
||||
@@ -147,8 +142,7 @@ spec:
|
||||
Used to prevent flapping (down->up->down->... loop)
|
||||
type: integer
|
||||
scaleTargetRef:
|
||||
description: ScaleTargetRef is the reference to scaled resource like
|
||||
RunnerDeployment
|
||||
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
|
||||
properties:
|
||||
kind:
|
||||
description: Kind is the type of resource being referenced
|
||||
@@ -240,8 +234,7 @@ spec:
|
||||
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
|
||||
properties:
|
||||
endTime:
|
||||
description: EndTime is the time at which the first override
|
||||
ends.
|
||||
description: EndTime is the time at which the first override ends.
|
||||
format: date-time
|
||||
type: string
|
||||
minReplicas:
|
||||
@@ -272,8 +265,7 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
startTime:
|
||||
description: StartTime is the time at which the first override
|
||||
starts.
|
||||
description: StartTime is the time at which the first override starts.
|
||||
format: date-time
|
||||
type: string
|
||||
required:
|
||||
@@ -322,3 +314,4 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.19.0
|
||||
controller-gen.kubebuilder.io/version: v0.17.2
|
||||
name: runnersets.actions.summerwind.dev
|
||||
spec:
|
||||
group: actions.summerwind.dev
|
||||
@@ -554,6 +554,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -568,6 +569,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -728,6 +730,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -742,6 +745,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -830,8 +834,8 @@ spec:
|
||||
most preferred is the one with the greatest sum of weights, i.e.
|
||||
for each node that meets all of the scheduling requirements (resource
|
||||
request, requiredDuringScheduling anti-affinity expressions, etc.),
|
||||
compute a sum by iterating through the elements of this field and subtracting
|
||||
"weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
|
||||
compute a sum by iterating through the elements of this field and adding
|
||||
"weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
|
||||
node(s) with the highest sum are the most preferred.
|
||||
items:
|
||||
description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
|
||||
@@ -895,6 +899,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -909,6 +914,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -1069,6 +1075,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -1083,6 +1090,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -1209,9 +1217,7 @@ spec:
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
@@ -1265,42 +1271,6 @@ spec:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fileKeyRef:
|
||||
description: |-
|
||||
FileKeyRef selects a key of the env file.
|
||||
Requires the EnvFiles feature gate to be enabled.
|
||||
properties:
|
||||
key:
|
||||
description: |-
|
||||
The key within the env file. An invalid key will prevent the pod from starting.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
|
||||
type: string
|
||||
optional:
|
||||
default: false
|
||||
description: |-
|
||||
Specify whether the file or its key must be defined. If the file or key
|
||||
does not exist, then the env var is not published.
|
||||
If optional is set to true and the specified key does not exist,
|
||||
the environment variable will not be set in the Pod's containers.
|
||||
|
||||
If optional is set to false and the specified key does not exist,
|
||||
an error will be returned during Pod creation.
|
||||
type: boolean
|
||||
path:
|
||||
description: |-
|
||||
The path within the volume from which to select the file.
|
||||
Must be relative and may not contain the '..' path or start with '..'.
|
||||
type: string
|
||||
volumeName:
|
||||
description: The name of the volume mount containing the env file.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- path
|
||||
- volumeName
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
@@ -1356,13 +1326,13 @@ spec:
|
||||
envFrom:
|
||||
description: |-
|
||||
List of sources to populate environment variables in the container.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
When a key exists in multiple
|
||||
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
will be reported as an event when the container is starting. When a key exists in multiple
|
||||
sources, the value associated with the last source will take precedence.
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -1382,9 +1352,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: |-
|
||||
Optional text to prepend to the name of each environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -1633,12 +1601,6 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: |-
|
||||
@@ -2029,7 +1991,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -2080,10 +2042,10 @@ spec:
|
||||
restartPolicy:
|
||||
description: |-
|
||||
RestartPolicy defines the restart behavior of individual containers in a pod.
|
||||
This overrides the pod-level restart policy. When this field is not specified,
|
||||
This field may only be set for init containers, and the only allowed value is "Always".
|
||||
For non-init containers or when this field is not specified,
|
||||
the restart behavior is defined by the Pod's restart policy and the container type.
|
||||
Additionally, setting the RestartPolicy as "Always" for the init container will
|
||||
have the following effect:
|
||||
Setting the RestartPolicy as "Always" for the init container will have the following effect:
|
||||
this init container will be continually restarted on
|
||||
exit until all regular containers have terminated. Once all regular
|
||||
containers have completed, all init containers with restartPolicy "Always"
|
||||
@@ -2095,57 +2057,6 @@ spec:
|
||||
init container is started, or after any startupProbe has successfully
|
||||
completed.
|
||||
type: string
|
||||
restartPolicyRules:
|
||||
description: |-
|
||||
Represents a list of rules to be checked to determine if the
|
||||
container should be restarted on exit. The rules are evaluated in
|
||||
order. Once a rule matches a container exit condition, the remaining
|
||||
rules are ignored. If no rule matches the container exit condition,
|
||||
the Container-level restart policy determines the whether the container
|
||||
is restarted or not. Constraints on the rules:
|
||||
- At most 20 rules are allowed.
|
||||
- Rules can have the same action.
|
||||
- Identical rules are not forbidden in validations.
|
||||
When rules are specified, container MUST set RestartPolicy explicitly
|
||||
even it if matches the Pod's RestartPolicy.
|
||||
items:
|
||||
description: ContainerRestartRule describes how a container exit is handled.
|
||||
properties:
|
||||
action:
|
||||
description: |-
|
||||
Specifies the action taken on a container exit if the requirements
|
||||
are satisfied. The only possible value is "Restart" to restart the
|
||||
container.
|
||||
type: string
|
||||
exitCodes:
|
||||
description: Represents the exit codes to check on container exits.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents the relationship between the container exit code(s) and the
|
||||
specified values. Possible values are:
|
||||
- In: the requirement is satisfied if the container exit code is in the
|
||||
set of specified values.
|
||||
- NotIn: the requirement is satisfied if the container exit code is
|
||||
not in the set of specified values.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
Specifies the set of values to check for container exit codes.
|
||||
At most 255 elements are allowed.
|
||||
items:
|
||||
format: int32
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
required:
|
||||
- operator
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
securityContext:
|
||||
description: |-
|
||||
SecurityContext defines the security options the container should be run with.
|
||||
@@ -2743,9 +2654,7 @@ spec:
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
@@ -2799,42 +2708,6 @@ spec:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fileKeyRef:
|
||||
description: |-
|
||||
FileKeyRef selects a key of the env file.
|
||||
Requires the EnvFiles feature gate to be enabled.
|
||||
properties:
|
||||
key:
|
||||
description: |-
|
||||
The key within the env file. An invalid key will prevent the pod from starting.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
|
||||
type: string
|
||||
optional:
|
||||
default: false
|
||||
description: |-
|
||||
Specify whether the file or its key must be defined. If the file or key
|
||||
does not exist, then the env var is not published.
|
||||
If optional is set to true and the specified key does not exist,
|
||||
the environment variable will not be set in the Pod's containers.
|
||||
|
||||
If optional is set to false and the specified key does not exist,
|
||||
an error will be returned during Pod creation.
|
||||
type: boolean
|
||||
path:
|
||||
description: |-
|
||||
The path within the volume from which to select the file.
|
||||
Must be relative and may not contain the '..' path or start with '..'.
|
||||
type: string
|
||||
volumeName:
|
||||
description: The name of the volume mount containing the env file.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- path
|
||||
- volumeName
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
@@ -2890,13 +2763,13 @@ spec:
|
||||
envFrom:
|
||||
description: |-
|
||||
List of sources to populate environment variables in the container.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
When a key exists in multiple
|
||||
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
will be reported as an event when the container is starting. When a key exists in multiple
|
||||
sources, the value associated with the last source will take precedence.
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -2916,9 +2789,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: |-
|
||||
Optional text to prepend to the name of each environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -3163,12 +3034,6 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: Probes are not allowed for ephemeral containers.
|
||||
@@ -3542,7 +3407,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -3594,51 +3459,9 @@ spec:
|
||||
description: |-
|
||||
Restart policy for the container to manage the restart behavior of each
|
||||
container within a pod.
|
||||
You cannot set this field on ephemeral containers.
|
||||
type: string
|
||||
restartPolicyRules:
|
||||
description: |-
|
||||
Represents a list of rules to be checked to determine if the
|
||||
container should be restarted on exit. You cannot set this field on
|
||||
This may only be set for init containers. You cannot set this field on
|
||||
ephemeral containers.
|
||||
items:
|
||||
description: ContainerRestartRule describes how a container exit is handled.
|
||||
properties:
|
||||
action:
|
||||
description: |-
|
||||
Specifies the action taken on a container exit if the requirements
|
||||
are satisfied. The only possible value is "Restart" to restart the
|
||||
container.
|
||||
type: string
|
||||
exitCodes:
|
||||
description: Represents the exit codes to check on container exits.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents the relationship between the container exit code(s) and the
|
||||
specified values. Possible values are:
|
||||
- In: the requirement is satisfied if the container exit code is in the
|
||||
set of specified values.
|
||||
- NotIn: the requirement is satisfied if the container exit code is
|
||||
not in the set of specified values.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
Specifies the set of values to check for container exit codes.
|
||||
At most 255 elements are allowed.
|
||||
items:
|
||||
format: int32
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
required:
|
||||
- operator
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
securityContext:
|
||||
description: |-
|
||||
Optional: SecurityContext defines the security options the ephemeral container should be run with.
|
||||
@@ -4157,9 +3980,7 @@ spec:
|
||||
hostNetwork:
|
||||
description: |-
|
||||
Host networking requested for this pod. Use the host's network namespace.
|
||||
When using HostNetwork you should specify ports so the scheduler is aware.
|
||||
When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
|
||||
and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
|
||||
If this option is set, the ports that will be used must be specified.
|
||||
Default to false.
|
||||
type: boolean
|
||||
hostPID:
|
||||
@@ -4184,19 +4005,6 @@ spec:
|
||||
Specifies the hostname of the Pod
|
||||
If not specified, the pod's hostname will be set to a system-defined value.
|
||||
type: string
|
||||
hostnameOverride:
|
||||
description: |-
|
||||
HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
|
||||
This field only specifies the pod's hostname and does not affect its DNS records.
|
||||
When this field is set to a non-empty string:
|
||||
- It takes precedence over the values set in `hostname` and `subdomain`.
|
||||
- The Pod's hostname will be set to this value.
|
||||
- `setHostnameAsFQDN` must be nil or set to false.
|
||||
- `hostNetwork` must be set to false.
|
||||
|
||||
This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
|
||||
Requires the HostnameOverride feature gate to be enabled.
|
||||
type: string
|
||||
imagePullSecrets:
|
||||
description: |-
|
||||
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
|
||||
@@ -4232,7 +4040,7 @@ spec:
|
||||
Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
|
||||
The resourceRequirements of an init container are taken into account during scheduling
|
||||
by finding the highest request/limit for each resource type, and then using the max of
|
||||
that value or the sum of the normal containers. Limits are applied to init containers
|
||||
of that value or the sum of the normal containers. Limits are applied to init containers
|
||||
in a similar fashion.
|
||||
Init containers cannot currently be added or removed.
|
||||
Cannot be updated.
|
||||
@@ -4276,9 +4084,7 @@ spec:
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
@@ -4332,42 +4138,6 @@ spec:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fileKeyRef:
|
||||
description: |-
|
||||
FileKeyRef selects a key of the env file.
|
||||
Requires the EnvFiles feature gate to be enabled.
|
||||
properties:
|
||||
key:
|
||||
description: |-
|
||||
The key within the env file. An invalid key will prevent the pod from starting.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
|
||||
type: string
|
||||
optional:
|
||||
default: false
|
||||
description: |-
|
||||
Specify whether the file or its key must be defined. If the file or key
|
||||
does not exist, then the env var is not published.
|
||||
If optional is set to true and the specified key does not exist,
|
||||
the environment variable will not be set in the Pod's containers.
|
||||
|
||||
If optional is set to false and the specified key does not exist,
|
||||
an error will be returned during Pod creation.
|
||||
type: boolean
|
||||
path:
|
||||
description: |-
|
||||
The path within the volume from which to select the file.
|
||||
Must be relative and may not contain the '..' path or start with '..'.
|
||||
type: string
|
||||
volumeName:
|
||||
description: The name of the volume mount containing the env file.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- path
|
||||
- volumeName
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
@@ -4423,13 +4193,13 @@ spec:
|
||||
envFrom:
|
||||
description: |-
|
||||
List of sources to populate environment variables in the container.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
When a key exists in multiple
|
||||
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
will be reported as an event when the container is starting. When a key exists in multiple
|
||||
sources, the value associated with the last source will take precedence.
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -4449,9 +4219,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: |-
|
||||
Optional text to prepend to the name of each environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -4700,12 +4468,6 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: |-
|
||||
@@ -5096,7 +4858,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -5147,10 +4909,10 @@ spec:
|
||||
restartPolicy:
|
||||
description: |-
|
||||
RestartPolicy defines the restart behavior of individual containers in a pod.
|
||||
This overrides the pod-level restart policy. When this field is not specified,
|
||||
This field may only be set for init containers, and the only allowed value is "Always".
|
||||
For non-init containers or when this field is not specified,
|
||||
the restart behavior is defined by the Pod's restart policy and the container type.
|
||||
Additionally, setting the RestartPolicy as "Always" for the init container will
|
||||
have the following effect:
|
||||
Setting the RestartPolicy as "Always" for the init container will have the following effect:
|
||||
this init container will be continually restarted on
|
||||
exit until all regular containers have terminated. Once all regular
|
||||
containers have completed, all init containers with restartPolicy "Always"
|
||||
@@ -5162,57 +4924,6 @@ spec:
|
||||
init container is started, or after any startupProbe has successfully
|
||||
completed.
|
||||
type: string
|
||||
restartPolicyRules:
|
||||
description: |-
|
||||
Represents a list of rules to be checked to determine if the
|
||||
container should be restarted on exit. The rules are evaluated in
|
||||
order. Once a rule matches a container exit condition, the remaining
|
||||
rules are ignored. If no rule matches the container exit condition,
|
||||
the Container-level restart policy determines the whether the container
|
||||
is restarted or not. Constraints on the rules:
|
||||
- At most 20 rules are allowed.
|
||||
- Rules can have the same action.
|
||||
- Identical rules are not forbidden in validations.
|
||||
When rules are specified, container MUST set RestartPolicy explicitly
|
||||
even it if matches the Pod's RestartPolicy.
|
||||
items:
|
||||
description: ContainerRestartRule describes how a container exit is handled.
|
||||
properties:
|
||||
action:
|
||||
description: |-
|
||||
Specifies the action taken on a container exit if the requirements
|
||||
are satisfied. The only possible value is "Restart" to restart the
|
||||
container.
|
||||
type: string
|
||||
exitCodes:
|
||||
description: Represents the exit codes to check on container exits.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents the relationship between the container exit code(s) and the
|
||||
specified values. Possible values are:
|
||||
- In: the requirement is satisfied if the container exit code is in the
|
||||
set of specified values.
|
||||
- NotIn: the requirement is satisfied if the container exit code is
|
||||
not in the set of specified values.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
Specifies the set of values to check for container exit codes.
|
||||
At most 255 elements are allowed.
|
||||
items:
|
||||
format: int32
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
required:
|
||||
- operator
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
securityContext:
|
||||
description: |-
|
||||
SecurityContext defines the security options the container should be run with.
|
||||
@@ -5726,7 +5437,6 @@ spec:
|
||||
- spec.hostPID
|
||||
- spec.hostIPC
|
||||
- spec.hostUsers
|
||||
- spec.resources
|
||||
- spec.securityContext.appArmorProfile
|
||||
- spec.securityContext.seLinuxOptions
|
||||
- spec.securityContext.seccompProfile
|
||||
@@ -5878,7 +5588,7 @@ spec:
|
||||
description: |-
|
||||
Resources is the total amount of CPU and Memory resources required by all
|
||||
containers in the pod. It supports specifying Requests and Limits for
|
||||
"cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
|
||||
"cpu" and "memory" resource names only. ResourceClaims are not supported.
|
||||
|
||||
This field enables fine-grained control over resource allocation for the
|
||||
entire pod, allowing resource sharing among containers in a pod.
|
||||
@@ -5891,7 +5601,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -6416,6 +6126,7 @@ spec:
|
||||
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
|
||||
|
||||
If this value is nil, the behavior is equivalent to the Honor policy.
|
||||
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
type: string
|
||||
nodeTaintsPolicy:
|
||||
description: |-
|
||||
@@ -6426,6 +6137,7 @@ spec:
|
||||
- Ignore: node taints are ignored. All nodes are included.
|
||||
|
||||
If this value is nil, the behavior is equivalent to the Ignore policy.
|
||||
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
type: string
|
||||
topologyKey:
|
||||
description: |-
|
||||
@@ -7131,13 +6843,15 @@ spec:
|
||||
volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
|
||||
If specified, the CSI driver will create or update the volume with the attributes defined
|
||||
in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
|
||||
it can be changed after the claim is created. An empty string or nil value indicates that no
|
||||
VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
|
||||
this field can be reset to its previous value (including nil) to cancel the modification.
|
||||
it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
|
||||
will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
|
||||
If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
|
||||
will be set by the persistentvolume controller if it exists.
|
||||
If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
|
||||
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
|
||||
exists.
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
|
||||
(Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
|
||||
type: string
|
||||
volumeMode:
|
||||
description: |-
|
||||
@@ -7311,9 +7025,12 @@ spec:
|
||||
description: |-
|
||||
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
|
||||
Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
|
||||
More info: https://examples.k8s.io/volumes/glusterfs/README.md
|
||||
properties:
|
||||
endpoints:
|
||||
description: endpoints is the endpoint name that details Glusterfs topology.
|
||||
description: |-
|
||||
endpoints is the endpoint name that details Glusterfs topology.
|
||||
More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
|
||||
type: string
|
||||
path:
|
||||
description: |-
|
||||
@@ -7367,7 +7084,7 @@ spec:
|
||||
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
|
||||
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
|
||||
The volume will be mounted read-only (ro) and non-executable files (noexec).
|
||||
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
|
||||
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
|
||||
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
|
||||
properties:
|
||||
pullPolicy:
|
||||
@@ -7392,7 +7109,7 @@ spec:
|
||||
description: |-
|
||||
iscsi represents an ISCSI Disk resource that is attached to a
|
||||
kubelet's host machine and then exposed to the pod.
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
|
||||
More info: https://examples.k8s.io/volumes/iscsi/README.md
|
||||
properties:
|
||||
chapAuthDiscovery:
|
||||
description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
|
||||
@@ -7782,110 +7499,6 @@ spec:
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
podCertificate:
|
||||
description: |-
|
||||
Projects an auto-rotating credential bundle (private key and certificate
|
||||
chain) that the pod can use either as a TLS client or server.
|
||||
|
||||
Kubelet generates a private key and uses it to send a
|
||||
PodCertificateRequest to the named signer. Once the signer approves the
|
||||
request and issues a certificate chain, Kubelet writes the key and
|
||||
certificate chain to the pod filesystem. The pod does not start until
|
||||
certificates have been issued for each podCertificate projected volume
|
||||
source in its spec.
|
||||
|
||||
Kubelet will begin trying to rotate the certificate at the time indicated
|
||||
by the signer using the PodCertificateRequest.Status.BeginRefreshAt
|
||||
timestamp.
|
||||
|
||||
Kubelet can write a single file, indicated by the credentialBundlePath
|
||||
field, or separate files, indicated by the keyPath and
|
||||
certificateChainPath fields.
|
||||
|
||||
The credential bundle is a single file in PEM format. The first PEM
|
||||
entry is the private key (in PKCS#8 format), and the remaining PEM
|
||||
entries are the certificate chain issued by the signer (typically,
|
||||
signers will return their certificate chain in leaf-to-root order).
|
||||
|
||||
Prefer using the credential bundle format, since your application code
|
||||
can read it atomically. If you use keyPath and certificateChainPath,
|
||||
your application must make two separate file reads. If these coincide
|
||||
with a certificate rotation, it is possible that the private key and leaf
|
||||
certificate you read may not correspond to each other. Your application
|
||||
will need to check for this condition, and re-read until they are
|
||||
consistent.
|
||||
|
||||
The named signer controls chooses the format of the certificate it
|
||||
issues; consult the signer implementation's documentation to learn how to
|
||||
use the certificates it issues.
|
||||
properties:
|
||||
certificateChainPath:
|
||||
description: |-
|
||||
Write the certificate chain at this path in the projected volume.
|
||||
|
||||
Most applications should use credentialBundlePath. When using keyPath
|
||||
and certificateChainPath, your application needs to check that the key
|
||||
and leaf certificate are consistent, because it is possible to read the
|
||||
files mid-rotation.
|
||||
type: string
|
||||
credentialBundlePath:
|
||||
description: |-
|
||||
Write the credential bundle at this path in the projected volume.
|
||||
|
||||
The credential bundle is a single file that contains multiple PEM blocks.
|
||||
The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
|
||||
key.
|
||||
|
||||
The remaining blocks are CERTIFICATE blocks, containing the issued
|
||||
certificate chain from the signer (leaf and any intermediates).
|
||||
|
||||
Using credentialBundlePath lets your Pod's application code make a single
|
||||
atomic read that retrieves a consistent key and certificate chain. If you
|
||||
project them to separate files, your application code will need to
|
||||
additionally check that the leaf certificate was issued to the key.
|
||||
type: string
|
||||
keyPath:
|
||||
description: |-
|
||||
Write the key at this path in the projected volume.
|
||||
|
||||
Most applications should use credentialBundlePath. When using keyPath
|
||||
and certificateChainPath, your application needs to check that the key
|
||||
and leaf certificate are consistent, because it is possible to read the
|
||||
files mid-rotation.
|
||||
type: string
|
||||
keyType:
|
||||
description: |-
|
||||
The type of keypair Kubelet will generate for the pod.
|
||||
|
||||
Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
|
||||
"ECDSAP521", and "ED25519".
|
||||
type: string
|
||||
maxExpirationSeconds:
|
||||
description: |-
|
||||
maxExpirationSeconds is the maximum lifetime permitted for the
|
||||
certificate.
|
||||
|
||||
Kubelet copies this value verbatim into the PodCertificateRequests it
|
||||
generates for this projection.
|
||||
|
||||
If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
|
||||
will reject values shorter than 3600 (1 hour). The maximum allowable
|
||||
value is 7862400 (91 days).
|
||||
|
||||
The signer implementation is then free to issue a certificate with any
|
||||
lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
|
||||
seconds (1 hour). This constraint is enforced by kube-apiserver.
|
||||
`kubernetes.io` signers will never issue certificates with a lifetime
|
||||
longer than 24 hours.
|
||||
format: int32
|
||||
type: integer
|
||||
signerName:
|
||||
description: Kubelet's generated CSRs will be addressed to this signer.
|
||||
type: string
|
||||
required:
|
||||
- keyType
|
||||
- signerName
|
||||
type: object
|
||||
secret:
|
||||
description: secret information about the secret data to project
|
||||
properties:
|
||||
@@ -8015,6 +7628,7 @@ spec:
|
||||
description: |-
|
||||
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
|
||||
Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
|
||||
More info: https://examples.k8s.io/volumes/rbd/README.md
|
||||
properties:
|
||||
fsType:
|
||||
description: |-
|
||||
@@ -8556,13 +8170,15 @@ spec:
|
||||
volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
|
||||
If specified, the CSI driver will create or update the volume with the attributes defined
|
||||
in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
|
||||
it can be changed after the claim is created. An empty string or nil value indicates that no
|
||||
VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
|
||||
this field can be reset to its previous value (including nil) to cancel the modification.
|
||||
it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
|
||||
will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
|
||||
If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
|
||||
will be set by the persistentvolume controller if it exists.
|
||||
If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
|
||||
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
|
||||
exists.
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
|
||||
(Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
|
||||
type: string
|
||||
volumeMode:
|
||||
description: |-
|
||||
@@ -8662,11 +8278,13 @@ spec:
|
||||
description: |-
|
||||
currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
|
||||
When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
|
||||
This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
|
||||
type: string
|
||||
modifyVolumeStatus:
|
||||
description: |-
|
||||
ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
|
||||
When this is unset, there is no ModifyVolume operation being attempted.
|
||||
This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
|
||||
properties:
|
||||
status:
|
||||
description: "status is the status of the ControllerModifyVolume operation. It can be in any of following states:\n - Pending\n Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as\n the specified VolumeAttributesClass not existing.\n - InProgress\n InProgress indicates that the volume is being modified.\n - Infeasible\n Infeasible indicates that the request has been rejected as invalid by the CSI driver. To\n\t resolve the error, a valid VolumeAttributesClass needs to be specified.\nNote: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately."
|
||||
@@ -8737,6 +8355,7 @@ spec:
|
||||
type: object
|
||||
required:
|
||||
- selector
|
||||
- serviceName
|
||||
- template
|
||||
type: object
|
||||
status:
|
||||
@@ -8770,3 +8389,4 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -15,13 +15,13 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.13.0
|
||||
version: 0.11.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.13.0"
|
||||
appVersion: "0.11.0"
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.19.0
|
||||
controller-gen.kubebuilder.io/version: v0.17.2
|
||||
name: ephemeralrunners.actions.github.com
|
||||
spec:
|
||||
group: actions.github.com
|
||||
@@ -36,9 +36,6 @@ spec:
|
||||
- jsonPath: .status.jobDisplayName
|
||||
name: JobDisplayName
|
||||
type: string
|
||||
- jsonPath: .status.jobId
|
||||
name: JobId
|
||||
type: string
|
||||
- jsonPath: .status.message
|
||||
name: Message
|
||||
type: string
|
||||
@@ -430,6 +427,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -444,6 +442,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -604,6 +603,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -618,6 +618,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -706,8 +707,8 @@ spec:
|
||||
most preferred is the one with the greatest sum of weights, i.e.
|
||||
for each node that meets all of the scheduling requirements (resource
|
||||
request, requiredDuringScheduling anti-affinity expressions, etc.),
|
||||
compute a sum by iterating through the elements of this field and subtracting
|
||||
"weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
|
||||
compute a sum by iterating through the elements of this field and adding
|
||||
"weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
|
||||
node(s) with the highest sum are the most preferred.
|
||||
items:
|
||||
description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
|
||||
@@ -771,6 +772,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -785,6 +787,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -945,6 +948,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -959,6 +963,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -1085,9 +1090,7 @@ spec:
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
@@ -1141,42 +1144,6 @@ spec:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fileKeyRef:
|
||||
description: |-
|
||||
FileKeyRef selects a key of the env file.
|
||||
Requires the EnvFiles feature gate to be enabled.
|
||||
properties:
|
||||
key:
|
||||
description: |-
|
||||
The key within the env file. An invalid key will prevent the pod from starting.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
|
||||
type: string
|
||||
optional:
|
||||
default: false
|
||||
description: |-
|
||||
Specify whether the file or its key must be defined. If the file or key
|
||||
does not exist, then the env var is not published.
|
||||
If optional is set to true and the specified key does not exist,
|
||||
the environment variable will not be set in the Pod's containers.
|
||||
|
||||
If optional is set to false and the specified key does not exist,
|
||||
an error will be returned during Pod creation.
|
||||
type: boolean
|
||||
path:
|
||||
description: |-
|
||||
The path within the volume from which to select the file.
|
||||
Must be relative and may not contain the '..' path or start with '..'.
|
||||
type: string
|
||||
volumeName:
|
||||
description: The name of the volume mount containing the env file.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- path
|
||||
- volumeName
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
@@ -1232,13 +1199,13 @@ spec:
|
||||
envFrom:
|
||||
description: |-
|
||||
List of sources to populate environment variables in the container.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
When a key exists in multiple
|
||||
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
will be reported as an event when the container is starting. When a key exists in multiple
|
||||
sources, the value associated with the last source will take precedence.
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -1258,9 +1225,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: |-
|
||||
Optional text to prepend to the name of each environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -1509,12 +1474,6 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: |-
|
||||
@@ -1905,7 +1864,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -1956,10 +1915,10 @@ spec:
|
||||
restartPolicy:
|
||||
description: |-
|
||||
RestartPolicy defines the restart behavior of individual containers in a pod.
|
||||
This overrides the pod-level restart policy. When this field is not specified,
|
||||
This field may only be set for init containers, and the only allowed value is "Always".
|
||||
For non-init containers or when this field is not specified,
|
||||
the restart behavior is defined by the Pod's restart policy and the container type.
|
||||
Additionally, setting the RestartPolicy as "Always" for the init container will
|
||||
have the following effect:
|
||||
Setting the RestartPolicy as "Always" for the init container will have the following effect:
|
||||
this init container will be continually restarted on
|
||||
exit until all regular containers have terminated. Once all regular
|
||||
containers have completed, all init containers with restartPolicy "Always"
|
||||
@@ -1971,57 +1930,6 @@ spec:
|
||||
init container is started, or after any startupProbe has successfully
|
||||
completed.
|
||||
type: string
|
||||
restartPolicyRules:
|
||||
description: |-
|
||||
Represents a list of rules to be checked to determine if the
|
||||
container should be restarted on exit. The rules are evaluated in
|
||||
order. Once a rule matches a container exit condition, the remaining
|
||||
rules are ignored. If no rule matches the container exit condition,
|
||||
the Container-level restart policy determines the whether the container
|
||||
is restarted or not. Constraints on the rules:
|
||||
- At most 20 rules are allowed.
|
||||
- Rules can have the same action.
|
||||
- Identical rules are not forbidden in validations.
|
||||
When rules are specified, container MUST set RestartPolicy explicitly
|
||||
even it if matches the Pod's RestartPolicy.
|
||||
items:
|
||||
description: ContainerRestartRule describes how a container exit is handled.
|
||||
properties:
|
||||
action:
|
||||
description: |-
|
||||
Specifies the action taken on a container exit if the requirements
|
||||
are satisfied. The only possible value is "Restart" to restart the
|
||||
container.
|
||||
type: string
|
||||
exitCodes:
|
||||
description: Represents the exit codes to check on container exits.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents the relationship between the container exit code(s) and the
|
||||
specified values. Possible values are:
|
||||
- In: the requirement is satisfied if the container exit code is in the
|
||||
set of specified values.
|
||||
- NotIn: the requirement is satisfied if the container exit code is
|
||||
not in the set of specified values.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
Specifies the set of values to check for container exit codes.
|
||||
At most 255 elements are allowed.
|
||||
items:
|
||||
format: int32
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
required:
|
||||
- operator
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
securityContext:
|
||||
description: |-
|
||||
SecurityContext defines the security options the container should be run with.
|
||||
@@ -2619,9 +2527,7 @@ spec:
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
@@ -2675,42 +2581,6 @@ spec:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fileKeyRef:
|
||||
description: |-
|
||||
FileKeyRef selects a key of the env file.
|
||||
Requires the EnvFiles feature gate to be enabled.
|
||||
properties:
|
||||
key:
|
||||
description: |-
|
||||
The key within the env file. An invalid key will prevent the pod from starting.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
|
||||
type: string
|
||||
optional:
|
||||
default: false
|
||||
description: |-
|
||||
Specify whether the file or its key must be defined. If the file or key
|
||||
does not exist, then the env var is not published.
|
||||
If optional is set to true and the specified key does not exist,
|
||||
the environment variable will not be set in the Pod's containers.
|
||||
|
||||
If optional is set to false and the specified key does not exist,
|
||||
an error will be returned during Pod creation.
|
||||
type: boolean
|
||||
path:
|
||||
description: |-
|
||||
The path within the volume from which to select the file.
|
||||
Must be relative and may not contain the '..' path or start with '..'.
|
||||
type: string
|
||||
volumeName:
|
||||
description: The name of the volume mount containing the env file.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- path
|
||||
- volumeName
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
@@ -2766,13 +2636,13 @@ spec:
|
||||
envFrom:
|
||||
description: |-
|
||||
List of sources to populate environment variables in the container.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
When a key exists in multiple
|
||||
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
will be reported as an event when the container is starting. When a key exists in multiple
|
||||
sources, the value associated with the last source will take precedence.
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -2792,9 +2662,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: |-
|
||||
Optional text to prepend to the name of each environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -3039,12 +2907,6 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: Probes are not allowed for ephemeral containers.
|
||||
@@ -3418,7 +3280,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -3470,51 +3332,9 @@ spec:
|
||||
description: |-
|
||||
Restart policy for the container to manage the restart behavior of each
|
||||
container within a pod.
|
||||
You cannot set this field on ephemeral containers.
|
||||
type: string
|
||||
restartPolicyRules:
|
||||
description: |-
|
||||
Represents a list of rules to be checked to determine if the
|
||||
container should be restarted on exit. You cannot set this field on
|
||||
This may only be set for init containers. You cannot set this field on
|
||||
ephemeral containers.
|
||||
items:
|
||||
description: ContainerRestartRule describes how a container exit is handled.
|
||||
properties:
|
||||
action:
|
||||
description: |-
|
||||
Specifies the action taken on a container exit if the requirements
|
||||
are satisfied. The only possible value is "Restart" to restart the
|
||||
container.
|
||||
type: string
|
||||
exitCodes:
|
||||
description: Represents the exit codes to check on container exits.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents the relationship between the container exit code(s) and the
|
||||
specified values. Possible values are:
|
||||
- In: the requirement is satisfied if the container exit code is in the
|
||||
set of specified values.
|
||||
- NotIn: the requirement is satisfied if the container exit code is
|
||||
not in the set of specified values.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
Specifies the set of values to check for container exit codes.
|
||||
At most 255 elements are allowed.
|
||||
items:
|
||||
format: int32
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
required:
|
||||
- operator
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
securityContext:
|
||||
description: |-
|
||||
Optional: SecurityContext defines the security options the ephemeral container should be run with.
|
||||
@@ -4033,9 +3853,7 @@ spec:
|
||||
hostNetwork:
|
||||
description: |-
|
||||
Host networking requested for this pod. Use the host's network namespace.
|
||||
When using HostNetwork you should specify ports so the scheduler is aware.
|
||||
When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
|
||||
and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
|
||||
If this option is set, the ports that will be used must be specified.
|
||||
Default to false.
|
||||
type: boolean
|
||||
hostPID:
|
||||
@@ -4060,19 +3878,6 @@ spec:
|
||||
Specifies the hostname of the Pod
|
||||
If not specified, the pod's hostname will be set to a system-defined value.
|
||||
type: string
|
||||
hostnameOverride:
|
||||
description: |-
|
||||
HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
|
||||
This field only specifies the pod's hostname and does not affect its DNS records.
|
||||
When this field is set to a non-empty string:
|
||||
- It takes precedence over the values set in `hostname` and `subdomain`.
|
||||
- The Pod's hostname will be set to this value.
|
||||
- `setHostnameAsFQDN` must be nil or set to false.
|
||||
- `hostNetwork` must be set to false.
|
||||
|
||||
This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
|
||||
Requires the HostnameOverride feature gate to be enabled.
|
||||
type: string
|
||||
imagePullSecrets:
|
||||
description: |-
|
||||
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
|
||||
@@ -4108,7 +3913,7 @@ spec:
|
||||
Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
|
||||
The resourceRequirements of an init container are taken into account during scheduling
|
||||
by finding the highest request/limit for each resource type, and then using the max of
|
||||
that value or the sum of the normal containers. Limits are applied to init containers
|
||||
of that value or the sum of the normal containers. Limits are applied to init containers
|
||||
in a similar fashion.
|
||||
Init containers cannot currently be added or removed.
|
||||
Cannot be updated.
|
||||
@@ -4152,9 +3957,7 @@ spec:
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
@@ -4208,42 +4011,6 @@ spec:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fileKeyRef:
|
||||
description: |-
|
||||
FileKeyRef selects a key of the env file.
|
||||
Requires the EnvFiles feature gate to be enabled.
|
||||
properties:
|
||||
key:
|
||||
description: |-
|
||||
The key within the env file. An invalid key will prevent the pod from starting.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
|
||||
type: string
|
||||
optional:
|
||||
default: false
|
||||
description: |-
|
||||
Specify whether the file or its key must be defined. If the file or key
|
||||
does not exist, then the env var is not published.
|
||||
If optional is set to true and the specified key does not exist,
|
||||
the environment variable will not be set in the Pod's containers.
|
||||
|
||||
If optional is set to false and the specified key does not exist,
|
||||
an error will be returned during Pod creation.
|
||||
type: boolean
|
||||
path:
|
||||
description: |-
|
||||
The path within the volume from which to select the file.
|
||||
Must be relative and may not contain the '..' path or start with '..'.
|
||||
type: string
|
||||
volumeName:
|
||||
description: The name of the volume mount containing the env file.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- path
|
||||
- volumeName
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
@@ -4299,13 +4066,13 @@ spec:
|
||||
envFrom:
|
||||
description: |-
|
||||
List of sources to populate environment variables in the container.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
When a key exists in multiple
|
||||
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
will be reported as an event when the container is starting. When a key exists in multiple
|
||||
sources, the value associated with the last source will take precedence.
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -4325,9 +4092,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: |-
|
||||
Optional text to prepend to the name of each environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -4576,12 +4341,6 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: |-
|
||||
@@ -4972,7 +4731,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -5023,10 +4782,10 @@ spec:
|
||||
restartPolicy:
|
||||
description: |-
|
||||
RestartPolicy defines the restart behavior of individual containers in a pod.
|
||||
This overrides the pod-level restart policy. When this field is not specified,
|
||||
This field may only be set for init containers, and the only allowed value is "Always".
|
||||
For non-init containers or when this field is not specified,
|
||||
the restart behavior is defined by the Pod's restart policy and the container type.
|
||||
Additionally, setting the RestartPolicy as "Always" for the init container will
|
||||
have the following effect:
|
||||
Setting the RestartPolicy as "Always" for the init container will have the following effect:
|
||||
this init container will be continually restarted on
|
||||
exit until all regular containers have terminated. Once all regular
|
||||
containers have completed, all init containers with restartPolicy "Always"
|
||||
@@ -5038,57 +4797,6 @@ spec:
|
||||
init container is started, or after any startupProbe has successfully
|
||||
completed.
|
||||
type: string
|
||||
restartPolicyRules:
|
||||
description: |-
|
||||
Represents a list of rules to be checked to determine if the
|
||||
container should be restarted on exit. The rules are evaluated in
|
||||
order. Once a rule matches a container exit condition, the remaining
|
||||
rules are ignored. If no rule matches the container exit condition,
|
||||
the Container-level restart policy determines the whether the container
|
||||
is restarted or not. Constraints on the rules:
|
||||
- At most 20 rules are allowed.
|
||||
- Rules can have the same action.
|
||||
- Identical rules are not forbidden in validations.
|
||||
When rules are specified, container MUST set RestartPolicy explicitly
|
||||
even it if matches the Pod's RestartPolicy.
|
||||
items:
|
||||
description: ContainerRestartRule describes how a container exit is handled.
|
||||
properties:
|
||||
action:
|
||||
description: |-
|
||||
Specifies the action taken on a container exit if the requirements
|
||||
are satisfied. The only possible value is "Restart" to restart the
|
||||
container.
|
||||
type: string
|
||||
exitCodes:
|
||||
description: Represents the exit codes to check on container exits.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents the relationship between the container exit code(s) and the
|
||||
specified values. Possible values are:
|
||||
- In: the requirement is satisfied if the container exit code is in the
|
||||
set of specified values.
|
||||
- NotIn: the requirement is satisfied if the container exit code is
|
||||
not in the set of specified values.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
Specifies the set of values to check for container exit codes.
|
||||
At most 255 elements are allowed.
|
||||
items:
|
||||
format: int32
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
required:
|
||||
- operator
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
securityContext:
|
||||
description: |-
|
||||
SecurityContext defines the security options the container should be run with.
|
||||
@@ -5602,7 +5310,6 @@ spec:
|
||||
- spec.hostPID
|
||||
- spec.hostIPC
|
||||
- spec.hostUsers
|
||||
- spec.resources
|
||||
- spec.securityContext.appArmorProfile
|
||||
- spec.securityContext.seLinuxOptions
|
||||
- spec.securityContext.seccompProfile
|
||||
@@ -5754,7 +5461,7 @@ spec:
|
||||
description: |-
|
||||
Resources is the total amount of CPU and Memory resources required by all
|
||||
containers in the pod. It supports specifying Requests and Limits for
|
||||
"cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
|
||||
"cpu" and "memory" resource names only. ResourceClaims are not supported.
|
||||
|
||||
This field enables fine-grained control over resource allocation for the
|
||||
entire pod, allowing resource sharing among containers in a pod.
|
||||
@@ -5767,7 +5474,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -6295,6 +6002,7 @@ spec:
|
||||
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
|
||||
|
||||
If this value is nil, the behavior is equivalent to the Honor policy.
|
||||
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
type: string
|
||||
nodeTaintsPolicy:
|
||||
description: |-
|
||||
@@ -6305,6 +6013,7 @@ spec:
|
||||
- Ignore: node taints are ignored. All nodes are included.
|
||||
|
||||
If this value is nil, the behavior is equivalent to the Ignore policy.
|
||||
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
type: string
|
||||
topologyKey:
|
||||
description: |-
|
||||
@@ -7010,13 +6719,15 @@ spec:
|
||||
volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
|
||||
If specified, the CSI driver will create or update the volume with the attributes defined
|
||||
in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
|
||||
it can be changed after the claim is created. An empty string or nil value indicates that no
|
||||
VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
|
||||
this field can be reset to its previous value (including nil) to cancel the modification.
|
||||
it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
|
||||
will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
|
||||
If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
|
||||
will be set by the persistentvolume controller if it exists.
|
||||
If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
|
||||
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
|
||||
exists.
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
|
||||
(Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
|
||||
type: string
|
||||
volumeMode:
|
||||
description: |-
|
||||
@@ -7190,9 +6901,12 @@ spec:
|
||||
description: |-
|
||||
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
|
||||
Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
|
||||
More info: https://examples.k8s.io/volumes/glusterfs/README.md
|
||||
properties:
|
||||
endpoints:
|
||||
description: endpoints is the endpoint name that details Glusterfs topology.
|
||||
description: |-
|
||||
endpoints is the endpoint name that details Glusterfs topology.
|
||||
More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
|
||||
type: string
|
||||
path:
|
||||
description: |-
|
||||
@@ -7246,7 +6960,7 @@ spec:
|
||||
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
|
||||
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
|
||||
The volume will be mounted read-only (ro) and non-executable files (noexec).
|
||||
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
|
||||
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
|
||||
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
|
||||
properties:
|
||||
pullPolicy:
|
||||
@@ -7271,7 +6985,7 @@ spec:
|
||||
description: |-
|
||||
iscsi represents an ISCSI Disk resource that is attached to a
|
||||
kubelet's host machine and then exposed to the pod.
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
|
||||
More info: https://examples.k8s.io/volumes/iscsi/README.md
|
||||
properties:
|
||||
chapAuthDiscovery:
|
||||
description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
|
||||
@@ -7661,110 +7375,6 @@ spec:
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
podCertificate:
|
||||
description: |-
|
||||
Projects an auto-rotating credential bundle (private key and certificate
|
||||
chain) that the pod can use either as a TLS client or server.
|
||||
|
||||
Kubelet generates a private key and uses it to send a
|
||||
PodCertificateRequest to the named signer. Once the signer approves the
|
||||
request and issues a certificate chain, Kubelet writes the key and
|
||||
certificate chain to the pod filesystem. The pod does not start until
|
||||
certificates have been issued for each podCertificate projected volume
|
||||
source in its spec.
|
||||
|
||||
Kubelet will begin trying to rotate the certificate at the time indicated
|
||||
by the signer using the PodCertificateRequest.Status.BeginRefreshAt
|
||||
timestamp.
|
||||
|
||||
Kubelet can write a single file, indicated by the credentialBundlePath
|
||||
field, or separate files, indicated by the keyPath and
|
||||
certificateChainPath fields.
|
||||
|
||||
The credential bundle is a single file in PEM format. The first PEM
|
||||
entry is the private key (in PKCS#8 format), and the remaining PEM
|
||||
entries are the certificate chain issued by the signer (typically,
|
||||
signers will return their certificate chain in leaf-to-root order).
|
||||
|
||||
Prefer using the credential bundle format, since your application code
|
||||
can read it atomically. If you use keyPath and certificateChainPath,
|
||||
your application must make two separate file reads. If these coincide
|
||||
with a certificate rotation, it is possible that the private key and leaf
|
||||
certificate you read may not correspond to each other. Your application
|
||||
will need to check for this condition, and re-read until they are
|
||||
consistent.
|
||||
|
||||
The named signer controls chooses the format of the certificate it
|
||||
issues; consult the signer implementation's documentation to learn how to
|
||||
use the certificates it issues.
|
||||
properties:
|
||||
certificateChainPath:
|
||||
description: |-
|
||||
Write the certificate chain at this path in the projected volume.
|
||||
|
||||
Most applications should use credentialBundlePath. When using keyPath
|
||||
and certificateChainPath, your application needs to check that the key
|
||||
and leaf certificate are consistent, because it is possible to read the
|
||||
files mid-rotation.
|
||||
type: string
|
||||
credentialBundlePath:
|
||||
description: |-
|
||||
Write the credential bundle at this path in the projected volume.
|
||||
|
||||
The credential bundle is a single file that contains multiple PEM blocks.
|
||||
The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
|
||||
key.
|
||||
|
||||
The remaining blocks are CERTIFICATE blocks, containing the issued
|
||||
certificate chain from the signer (leaf and any intermediates).
|
||||
|
||||
Using credentialBundlePath lets your Pod's application code make a single
|
||||
atomic read that retrieves a consistent key and certificate chain. If you
|
||||
project them to separate files, your application code will need to
|
||||
additionally check that the leaf certificate was issued to the key.
|
||||
type: string
|
||||
keyPath:
|
||||
description: |-
|
||||
Write the key at this path in the projected volume.
|
||||
|
||||
Most applications should use credentialBundlePath. When using keyPath
|
||||
and certificateChainPath, your application needs to check that the key
|
||||
and leaf certificate are consistent, because it is possible to read the
|
||||
files mid-rotation.
|
||||
type: string
|
||||
keyType:
|
||||
description: |-
|
||||
The type of keypair Kubelet will generate for the pod.
|
||||
|
||||
Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
|
||||
"ECDSAP521", and "ED25519".
|
||||
type: string
|
||||
maxExpirationSeconds:
|
||||
description: |-
|
||||
maxExpirationSeconds is the maximum lifetime permitted for the
|
||||
certificate.
|
||||
|
||||
Kubelet copies this value verbatim into the PodCertificateRequests it
|
||||
generates for this projection.
|
||||
|
||||
If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
|
||||
will reject values shorter than 3600 (1 hour). The maximum allowable
|
||||
value is 7862400 (91 days).
|
||||
|
||||
The signer implementation is then free to issue a certificate with any
|
||||
lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
|
||||
seconds (1 hour). This constraint is enforced by kube-apiserver.
|
||||
`kubernetes.io` signers will never issue certificates with a lifetime
|
||||
longer than 24 hours.
|
||||
format: int32
|
||||
type: integer
|
||||
signerName:
|
||||
description: Kubelet's generated CSRs will be addressed to this signer.
|
||||
type: string
|
||||
required:
|
||||
- keyType
|
||||
- signerName
|
||||
type: object
|
||||
secret:
|
||||
description: secret information about the secret data to project
|
||||
properties:
|
||||
@@ -7894,6 +7504,7 @@ spec:
|
||||
description: |-
|
||||
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
|
||||
Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
|
||||
More info: https://examples.k8s.io/volumes/rbd/README.md
|
||||
properties:
|
||||
fsType:
|
||||
description: |-
|
||||
@@ -8173,53 +7784,6 @@ spec:
|
||||
required:
|
||||
- containers
|
||||
type: object
|
||||
vaultConfig:
|
||||
properties:
|
||||
azureKeyVault:
|
||||
properties:
|
||||
certificatePath:
|
||||
type: string
|
||||
clientId:
|
||||
type: string
|
||||
tenantId:
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
required:
|
||||
- certificatePath
|
||||
- clientId
|
||||
- tenantId
|
||||
- url
|
||||
type: object
|
||||
proxy:
|
||||
properties:
|
||||
http:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
https:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
noProxy:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type:
|
||||
description: |-
|
||||
VaultType represents the type of vault that can be used in the application.
|
||||
It is used to identify which vault integration should be used to resolve secrets.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- githubConfigSecret
|
||||
- githubConfigUrl
|
||||
@@ -8230,13 +7794,10 @@ spec:
|
||||
properties:
|
||||
failures:
|
||||
additionalProperties:
|
||||
format: date-time
|
||||
type: string
|
||||
type: boolean
|
||||
type: object
|
||||
jobDisplayName:
|
||||
type: string
|
||||
jobId:
|
||||
type: string
|
||||
jobRepositoryName:
|
||||
type: string
|
||||
jobRequestId:
|
||||
@@ -8265,6 +7826,8 @@ spec:
|
||||
type: string
|
||||
runnerId:
|
||||
type: integer
|
||||
runnerJITConfig:
|
||||
type: string
|
||||
runnerName:
|
||||
type: string
|
||||
workflowRunId:
|
||||
@@ -8276,3 +7839,4 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.19.0
|
||||
controller-gen.kubebuilder.io/version: v0.17.2
|
||||
name: ephemeralrunnersets.actions.github.com
|
||||
spec:
|
||||
group: actions.github.com
|
||||
@@ -421,6 +421,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -435,6 +436,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -595,6 +597,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -609,6 +612,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -697,8 +701,8 @@ spec:
|
||||
most preferred is the one with the greatest sum of weights, i.e.
|
||||
for each node that meets all of the scheduling requirements (resource
|
||||
request, requiredDuringScheduling anti-affinity expressions, etc.),
|
||||
compute a sum by iterating through the elements of this field and subtracting
|
||||
"weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
|
||||
compute a sum by iterating through the elements of this field and adding
|
||||
"weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
|
||||
node(s) with the highest sum are the most preferred.
|
||||
items:
|
||||
description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
|
||||
@@ -762,6 +766,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -776,6 +781,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -936,6 +942,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -950,6 +957,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -1076,9 +1084,7 @@ spec:
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
@@ -1132,42 +1138,6 @@ spec:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fileKeyRef:
|
||||
description: |-
|
||||
FileKeyRef selects a key of the env file.
|
||||
Requires the EnvFiles feature gate to be enabled.
|
||||
properties:
|
||||
key:
|
||||
description: |-
|
||||
The key within the env file. An invalid key will prevent the pod from starting.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
|
||||
type: string
|
||||
optional:
|
||||
default: false
|
||||
description: |-
|
||||
Specify whether the file or its key must be defined. If the file or key
|
||||
does not exist, then the env var is not published.
|
||||
If optional is set to true and the specified key does not exist,
|
||||
the environment variable will not be set in the Pod's containers.
|
||||
|
||||
If optional is set to false and the specified key does not exist,
|
||||
an error will be returned during Pod creation.
|
||||
type: boolean
|
||||
path:
|
||||
description: |-
|
||||
The path within the volume from which to select the file.
|
||||
Must be relative and may not contain the '..' path or start with '..'.
|
||||
type: string
|
||||
volumeName:
|
||||
description: The name of the volume mount containing the env file.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- path
|
||||
- volumeName
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
@@ -1223,13 +1193,13 @@ spec:
|
||||
envFrom:
|
||||
description: |-
|
||||
List of sources to populate environment variables in the container.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
When a key exists in multiple
|
||||
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
will be reported as an event when the container is starting. When a key exists in multiple
|
||||
sources, the value associated with the last source will take precedence.
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -1249,9 +1219,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: |-
|
||||
Optional text to prepend to the name of each environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -1500,12 +1468,6 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: |-
|
||||
@@ -1896,7 +1858,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -1947,10 +1909,10 @@ spec:
|
||||
restartPolicy:
|
||||
description: |-
|
||||
RestartPolicy defines the restart behavior of individual containers in a pod.
|
||||
This overrides the pod-level restart policy. When this field is not specified,
|
||||
This field may only be set for init containers, and the only allowed value is "Always".
|
||||
For non-init containers or when this field is not specified,
|
||||
the restart behavior is defined by the Pod's restart policy and the container type.
|
||||
Additionally, setting the RestartPolicy as "Always" for the init container will
|
||||
have the following effect:
|
||||
Setting the RestartPolicy as "Always" for the init container will have the following effect:
|
||||
this init container will be continually restarted on
|
||||
exit until all regular containers have terminated. Once all regular
|
||||
containers have completed, all init containers with restartPolicy "Always"
|
||||
@@ -1962,57 +1924,6 @@ spec:
|
||||
init container is started, or after any startupProbe has successfully
|
||||
completed.
|
||||
type: string
|
||||
restartPolicyRules:
|
||||
description: |-
|
||||
Represents a list of rules to be checked to determine if the
|
||||
container should be restarted on exit. The rules are evaluated in
|
||||
order. Once a rule matches a container exit condition, the remaining
|
||||
rules are ignored. If no rule matches the container exit condition,
|
||||
the Container-level restart policy determines the whether the container
|
||||
is restarted or not. Constraints on the rules:
|
||||
- At most 20 rules are allowed.
|
||||
- Rules can have the same action.
|
||||
- Identical rules are not forbidden in validations.
|
||||
When rules are specified, container MUST set RestartPolicy explicitly
|
||||
even it if matches the Pod's RestartPolicy.
|
||||
items:
|
||||
description: ContainerRestartRule describes how a container exit is handled.
|
||||
properties:
|
||||
action:
|
||||
description: |-
|
||||
Specifies the action taken on a container exit if the requirements
|
||||
are satisfied. The only possible value is "Restart" to restart the
|
||||
container.
|
||||
type: string
|
||||
exitCodes:
|
||||
description: Represents the exit codes to check on container exits.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents the relationship between the container exit code(s) and the
|
||||
specified values. Possible values are:
|
||||
- In: the requirement is satisfied if the container exit code is in the
|
||||
set of specified values.
|
||||
- NotIn: the requirement is satisfied if the container exit code is
|
||||
not in the set of specified values.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
Specifies the set of values to check for container exit codes.
|
||||
At most 255 elements are allowed.
|
||||
items:
|
||||
format: int32
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
required:
|
||||
- operator
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
securityContext:
|
||||
description: |-
|
||||
SecurityContext defines the security options the container should be run with.
|
||||
@@ -2610,9 +2521,7 @@ spec:
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
@@ -2666,42 +2575,6 @@ spec:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fileKeyRef:
|
||||
description: |-
|
||||
FileKeyRef selects a key of the env file.
|
||||
Requires the EnvFiles feature gate to be enabled.
|
||||
properties:
|
||||
key:
|
||||
description: |-
|
||||
The key within the env file. An invalid key will prevent the pod from starting.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
|
||||
type: string
|
||||
optional:
|
||||
default: false
|
||||
description: |-
|
||||
Specify whether the file or its key must be defined. If the file or key
|
||||
does not exist, then the env var is not published.
|
||||
If optional is set to true and the specified key does not exist,
|
||||
the environment variable will not be set in the Pod's containers.
|
||||
|
||||
If optional is set to false and the specified key does not exist,
|
||||
an error will be returned during Pod creation.
|
||||
type: boolean
|
||||
path:
|
||||
description: |-
|
||||
The path within the volume from which to select the file.
|
||||
Must be relative and may not contain the '..' path or start with '..'.
|
||||
type: string
|
||||
volumeName:
|
||||
description: The name of the volume mount containing the env file.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- path
|
||||
- volumeName
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
@@ -2757,13 +2630,13 @@ spec:
|
||||
envFrom:
|
||||
description: |-
|
||||
List of sources to populate environment variables in the container.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
When a key exists in multiple
|
||||
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
will be reported as an event when the container is starting. When a key exists in multiple
|
||||
sources, the value associated with the last source will take precedence.
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -2783,9 +2656,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: |-
|
||||
Optional text to prepend to the name of each environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -3030,12 +2901,6 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: Probes are not allowed for ephemeral containers.
|
||||
@@ -3409,7 +3274,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -3461,51 +3326,9 @@ spec:
|
||||
description: |-
|
||||
Restart policy for the container to manage the restart behavior of each
|
||||
container within a pod.
|
||||
You cannot set this field on ephemeral containers.
|
||||
type: string
|
||||
restartPolicyRules:
|
||||
description: |-
|
||||
Represents a list of rules to be checked to determine if the
|
||||
container should be restarted on exit. You cannot set this field on
|
||||
This may only be set for init containers. You cannot set this field on
|
||||
ephemeral containers.
|
||||
items:
|
||||
description: ContainerRestartRule describes how a container exit is handled.
|
||||
properties:
|
||||
action:
|
||||
description: |-
|
||||
Specifies the action taken on a container exit if the requirements
|
||||
are satisfied. The only possible value is "Restart" to restart the
|
||||
container.
|
||||
type: string
|
||||
exitCodes:
|
||||
description: Represents the exit codes to check on container exits.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents the relationship between the container exit code(s) and the
|
||||
specified values. Possible values are:
|
||||
- In: the requirement is satisfied if the container exit code is in the
|
||||
set of specified values.
|
||||
- NotIn: the requirement is satisfied if the container exit code is
|
||||
not in the set of specified values.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
Specifies the set of values to check for container exit codes.
|
||||
At most 255 elements are allowed.
|
||||
items:
|
||||
format: int32
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
required:
|
||||
- operator
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
securityContext:
|
||||
description: |-
|
||||
Optional: SecurityContext defines the security options the ephemeral container should be run with.
|
||||
@@ -4024,9 +3847,7 @@ spec:
|
||||
hostNetwork:
|
||||
description: |-
|
||||
Host networking requested for this pod. Use the host's network namespace.
|
||||
When using HostNetwork you should specify ports so the scheduler is aware.
|
||||
When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
|
||||
and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
|
||||
If this option is set, the ports that will be used must be specified.
|
||||
Default to false.
|
||||
type: boolean
|
||||
hostPID:
|
||||
@@ -4051,19 +3872,6 @@ spec:
|
||||
Specifies the hostname of the Pod
|
||||
If not specified, the pod's hostname will be set to a system-defined value.
|
||||
type: string
|
||||
hostnameOverride:
|
||||
description: |-
|
||||
HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
|
||||
This field only specifies the pod's hostname and does not affect its DNS records.
|
||||
When this field is set to a non-empty string:
|
||||
- It takes precedence over the values set in `hostname` and `subdomain`.
|
||||
- The Pod's hostname will be set to this value.
|
||||
- `setHostnameAsFQDN` must be nil or set to false.
|
||||
- `hostNetwork` must be set to false.
|
||||
|
||||
This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
|
||||
Requires the HostnameOverride feature gate to be enabled.
|
||||
type: string
|
||||
imagePullSecrets:
|
||||
description: |-
|
||||
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
|
||||
@@ -4099,7 +3907,7 @@ spec:
|
||||
Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
|
||||
The resourceRequirements of an init container are taken into account during scheduling
|
||||
by finding the highest request/limit for each resource type, and then using the max of
|
||||
that value or the sum of the normal containers. Limits are applied to init containers
|
||||
of that value or the sum of the normal containers. Limits are applied to init containers
|
||||
in a similar fashion.
|
||||
Init containers cannot currently be added or removed.
|
||||
Cannot be updated.
|
||||
@@ -4143,9 +3951,7 @@ spec:
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
@@ -4199,42 +4005,6 @@ spec:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fileKeyRef:
|
||||
description: |-
|
||||
FileKeyRef selects a key of the env file.
|
||||
Requires the EnvFiles feature gate to be enabled.
|
||||
properties:
|
||||
key:
|
||||
description: |-
|
||||
The key within the env file. An invalid key will prevent the pod from starting.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
|
||||
type: string
|
||||
optional:
|
||||
default: false
|
||||
description: |-
|
||||
Specify whether the file or its key must be defined. If the file or key
|
||||
does not exist, then the env var is not published.
|
||||
If optional is set to true and the specified key does not exist,
|
||||
the environment variable will not be set in the Pod's containers.
|
||||
|
||||
If optional is set to false and the specified key does not exist,
|
||||
an error will be returned during Pod creation.
|
||||
type: boolean
|
||||
path:
|
||||
description: |-
|
||||
The path within the volume from which to select the file.
|
||||
Must be relative and may not contain the '..' path or start with '..'.
|
||||
type: string
|
||||
volumeName:
|
||||
description: The name of the volume mount containing the env file.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- path
|
||||
- volumeName
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
@@ -4290,13 +4060,13 @@ spec:
|
||||
envFrom:
|
||||
description: |-
|
||||
List of sources to populate environment variables in the container.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
When a key exists in multiple
|
||||
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
will be reported as an event when the container is starting. When a key exists in multiple
|
||||
sources, the value associated with the last source will take precedence.
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -4316,9 +4086,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: |-
|
||||
Optional text to prepend to the name of each environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -4567,12 +4335,6 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: |-
|
||||
@@ -4963,7 +4725,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -5014,10 +4776,10 @@ spec:
|
||||
restartPolicy:
|
||||
description: |-
|
||||
RestartPolicy defines the restart behavior of individual containers in a pod.
|
||||
This overrides the pod-level restart policy. When this field is not specified,
|
||||
This field may only be set for init containers, and the only allowed value is "Always".
|
||||
For non-init containers or when this field is not specified,
|
||||
the restart behavior is defined by the Pod's restart policy and the container type.
|
||||
Additionally, setting the RestartPolicy as "Always" for the init container will
|
||||
have the following effect:
|
||||
Setting the RestartPolicy as "Always" for the init container will have the following effect:
|
||||
this init container will be continually restarted on
|
||||
exit until all regular containers have terminated. Once all regular
|
||||
containers have completed, all init containers with restartPolicy "Always"
|
||||
@@ -5029,57 +4791,6 @@ spec:
|
||||
init container is started, or after any startupProbe has successfully
|
||||
completed.
|
||||
type: string
|
||||
restartPolicyRules:
|
||||
description: |-
|
||||
Represents a list of rules to be checked to determine if the
|
||||
container should be restarted on exit. The rules are evaluated in
|
||||
order. Once a rule matches a container exit condition, the remaining
|
||||
rules are ignored. If no rule matches the container exit condition,
|
||||
the Container-level restart policy determines the whether the container
|
||||
is restarted or not. Constraints on the rules:
|
||||
- At most 20 rules are allowed.
|
||||
- Rules can have the same action.
|
||||
- Identical rules are not forbidden in validations.
|
||||
When rules are specified, container MUST set RestartPolicy explicitly
|
||||
even it if matches the Pod's RestartPolicy.
|
||||
items:
|
||||
description: ContainerRestartRule describes how a container exit is handled.
|
||||
properties:
|
||||
action:
|
||||
description: |-
|
||||
Specifies the action taken on a container exit if the requirements
|
||||
are satisfied. The only possible value is "Restart" to restart the
|
||||
container.
|
||||
type: string
|
||||
exitCodes:
|
||||
description: Represents the exit codes to check on container exits.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents the relationship between the container exit code(s) and the
|
||||
specified values. Possible values are:
|
||||
- In: the requirement is satisfied if the container exit code is in the
|
||||
set of specified values.
|
||||
- NotIn: the requirement is satisfied if the container exit code is
|
||||
not in the set of specified values.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
Specifies the set of values to check for container exit codes.
|
||||
At most 255 elements are allowed.
|
||||
items:
|
||||
format: int32
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
required:
|
||||
- operator
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
securityContext:
|
||||
description: |-
|
||||
SecurityContext defines the security options the container should be run with.
|
||||
@@ -5593,7 +5304,6 @@ spec:
|
||||
- spec.hostPID
|
||||
- spec.hostIPC
|
||||
- spec.hostUsers
|
||||
- spec.resources
|
||||
- spec.securityContext.appArmorProfile
|
||||
- spec.securityContext.seLinuxOptions
|
||||
- spec.securityContext.seccompProfile
|
||||
@@ -5745,7 +5455,7 @@ spec:
|
||||
description: |-
|
||||
Resources is the total amount of CPU and Memory resources required by all
|
||||
containers in the pod. It supports specifying Requests and Limits for
|
||||
"cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
|
||||
"cpu" and "memory" resource names only. ResourceClaims are not supported.
|
||||
|
||||
This field enables fine-grained control over resource allocation for the
|
||||
entire pod, allowing resource sharing among containers in a pod.
|
||||
@@ -5758,7 +5468,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -6286,6 +5996,7 @@ spec:
|
||||
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
|
||||
|
||||
If this value is nil, the behavior is equivalent to the Honor policy.
|
||||
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
type: string
|
||||
nodeTaintsPolicy:
|
||||
description: |-
|
||||
@@ -6296,6 +6007,7 @@ spec:
|
||||
- Ignore: node taints are ignored. All nodes are included.
|
||||
|
||||
If this value is nil, the behavior is equivalent to the Ignore policy.
|
||||
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
type: string
|
||||
topologyKey:
|
||||
description: |-
|
||||
@@ -7001,13 +6713,15 @@ spec:
|
||||
volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
|
||||
If specified, the CSI driver will create or update the volume with the attributes defined
|
||||
in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
|
||||
it can be changed after the claim is created. An empty string or nil value indicates that no
|
||||
VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
|
||||
this field can be reset to its previous value (including nil) to cancel the modification.
|
||||
it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
|
||||
will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
|
||||
If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
|
||||
will be set by the persistentvolume controller if it exists.
|
||||
If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
|
||||
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
|
||||
exists.
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
|
||||
(Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
|
||||
type: string
|
||||
volumeMode:
|
||||
description: |-
|
||||
@@ -7181,9 +6895,12 @@ spec:
|
||||
description: |-
|
||||
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
|
||||
Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
|
||||
More info: https://examples.k8s.io/volumes/glusterfs/README.md
|
||||
properties:
|
||||
endpoints:
|
||||
description: endpoints is the endpoint name that details Glusterfs topology.
|
||||
description: |-
|
||||
endpoints is the endpoint name that details Glusterfs topology.
|
||||
More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
|
||||
type: string
|
||||
path:
|
||||
description: |-
|
||||
@@ -7237,7 +6954,7 @@ spec:
|
||||
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
|
||||
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
|
||||
The volume will be mounted read-only (ro) and non-executable files (noexec).
|
||||
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
|
||||
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
|
||||
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
|
||||
properties:
|
||||
pullPolicy:
|
||||
@@ -7262,7 +6979,7 @@ spec:
|
||||
description: |-
|
||||
iscsi represents an ISCSI Disk resource that is attached to a
|
||||
kubelet's host machine and then exposed to the pod.
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
|
||||
More info: https://examples.k8s.io/volumes/iscsi/README.md
|
||||
properties:
|
||||
chapAuthDiscovery:
|
||||
description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
|
||||
@@ -7652,110 +7369,6 @@ spec:
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
podCertificate:
|
||||
description: |-
|
||||
Projects an auto-rotating credential bundle (private key and certificate
|
||||
chain) that the pod can use either as a TLS client or server.
|
||||
|
||||
Kubelet generates a private key and uses it to send a
|
||||
PodCertificateRequest to the named signer. Once the signer approves the
|
||||
request and issues a certificate chain, Kubelet writes the key and
|
||||
certificate chain to the pod filesystem. The pod does not start until
|
||||
certificates have been issued for each podCertificate projected volume
|
||||
source in its spec.
|
||||
|
||||
Kubelet will begin trying to rotate the certificate at the time indicated
|
||||
by the signer using the PodCertificateRequest.Status.BeginRefreshAt
|
||||
timestamp.
|
||||
|
||||
Kubelet can write a single file, indicated by the credentialBundlePath
|
||||
field, or separate files, indicated by the keyPath and
|
||||
certificateChainPath fields.
|
||||
|
||||
The credential bundle is a single file in PEM format. The first PEM
|
||||
entry is the private key (in PKCS#8 format), and the remaining PEM
|
||||
entries are the certificate chain issued by the signer (typically,
|
||||
signers will return their certificate chain in leaf-to-root order).
|
||||
|
||||
Prefer using the credential bundle format, since your application code
|
||||
can read it atomically. If you use keyPath and certificateChainPath,
|
||||
your application must make two separate file reads. If these coincide
|
||||
with a certificate rotation, it is possible that the private key and leaf
|
||||
certificate you read may not correspond to each other. Your application
|
||||
will need to check for this condition, and re-read until they are
|
||||
consistent.
|
||||
|
||||
The named signer controls chooses the format of the certificate it
|
||||
issues; consult the signer implementation's documentation to learn how to
|
||||
use the certificates it issues.
|
||||
properties:
|
||||
certificateChainPath:
|
||||
description: |-
|
||||
Write the certificate chain at this path in the projected volume.
|
||||
|
||||
Most applications should use credentialBundlePath. When using keyPath
|
||||
and certificateChainPath, your application needs to check that the key
|
||||
and leaf certificate are consistent, because it is possible to read the
|
||||
files mid-rotation.
|
||||
type: string
|
||||
credentialBundlePath:
|
||||
description: |-
|
||||
Write the credential bundle at this path in the projected volume.
|
||||
|
||||
The credential bundle is a single file that contains multiple PEM blocks.
|
||||
The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
|
||||
key.
|
||||
|
||||
The remaining blocks are CERTIFICATE blocks, containing the issued
|
||||
certificate chain from the signer (leaf and any intermediates).
|
||||
|
||||
Using credentialBundlePath lets your Pod's application code make a single
|
||||
atomic read that retrieves a consistent key and certificate chain. If you
|
||||
project them to separate files, your application code will need to
|
||||
additionally check that the leaf certificate was issued to the key.
|
||||
type: string
|
||||
keyPath:
|
||||
description: |-
|
||||
Write the key at this path in the projected volume.
|
||||
|
||||
Most applications should use credentialBundlePath. When using keyPath
|
||||
and certificateChainPath, your application needs to check that the key
|
||||
and leaf certificate are consistent, because it is possible to read the
|
||||
files mid-rotation.
|
||||
type: string
|
||||
keyType:
|
||||
description: |-
|
||||
The type of keypair Kubelet will generate for the pod.
|
||||
|
||||
Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
|
||||
"ECDSAP521", and "ED25519".
|
||||
type: string
|
||||
maxExpirationSeconds:
|
||||
description: |-
|
||||
maxExpirationSeconds is the maximum lifetime permitted for the
|
||||
certificate.
|
||||
|
||||
Kubelet copies this value verbatim into the PodCertificateRequests it
|
||||
generates for this projection.
|
||||
|
||||
If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
|
||||
will reject values shorter than 3600 (1 hour). The maximum allowable
|
||||
value is 7862400 (91 days).
|
||||
|
||||
The signer implementation is then free to issue a certificate with any
|
||||
lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
|
||||
seconds (1 hour). This constraint is enforced by kube-apiserver.
|
||||
`kubernetes.io` signers will never issue certificates with a lifetime
|
||||
longer than 24 hours.
|
||||
format: int32
|
||||
type: integer
|
||||
signerName:
|
||||
description: Kubelet's generated CSRs will be addressed to this signer.
|
||||
type: string
|
||||
required:
|
||||
- keyType
|
||||
- signerName
|
||||
type: object
|
||||
secret:
|
||||
description: secret information about the secret data to project
|
||||
properties:
|
||||
@@ -7885,6 +7498,7 @@ spec:
|
||||
description: |-
|
||||
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
|
||||
Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
|
||||
More info: https://examples.k8s.io/volumes/rbd/README.md
|
||||
properties:
|
||||
fsType:
|
||||
description: |-
|
||||
@@ -8164,53 +7778,6 @@ spec:
|
||||
required:
|
||||
- containers
|
||||
type: object
|
||||
vaultConfig:
|
||||
properties:
|
||||
azureKeyVault:
|
||||
properties:
|
||||
certificatePath:
|
||||
type: string
|
||||
clientId:
|
||||
type: string
|
||||
tenantId:
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
required:
|
||||
- certificatePath
|
||||
- clientId
|
||||
- tenantId
|
||||
- url
|
||||
type: object
|
||||
proxy:
|
||||
properties:
|
||||
http:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
https:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
noProxy:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type:
|
||||
description: |-
|
||||
VaultType represents the type of vault that can be used in the application.
|
||||
It is used to identify which vault integration should be used to resolve secrets.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- githubConfigSecret
|
||||
- githubConfigUrl
|
||||
@@ -8245,3 +7812,4 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -129,3 +129,11 @@ Create the name of the service account to use
|
||||
{{- define "gha-runner-scale-set-controller.leaderElectionRoleBinding" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.imagePullSecretsNames" -}}
|
||||
{{- $names := list }}
|
||||
{{- range $k, $v := . }}
|
||||
{{- $names = append $names $v.name }}
|
||||
{{- end }}
|
||||
{{- $names | join ","}}
|
||||
{{- end }}
|
||||
|
||||
@@ -54,9 +54,7 @@ spec:
|
||||
- "--leader-election-id={{ include "gha-runner-scale-set-controller.fullname" . }}"
|
||||
{{- end }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
{{- range . }}
|
||||
- "--auto-scaler-image-pull-secrets={{- .name -}}"
|
||||
{{- end }}
|
||||
- "--auto-scaler-image-pull-secrets={{ include "gha-runner-scale-set-controller.imagePullSecretsNames" . }}"
|
||||
{{- end }}
|
||||
{{- with .Values.flags.logLevel }}
|
||||
- "--log-level={{ . }}"
|
||||
|
||||
@@ -683,8 +683,7 @@ func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) {
|
||||
|
||||
expectedArgs := []string{
|
||||
"--auto-scaling-runner-set-only",
|
||||
"--auto-scaler-image-pull-secrets=dockerhub",
|
||||
"--auto-scaler-image-pull-secrets=ghcr",
|
||||
"--auto-scaler-image-pull-secrets=dockerhub,ghcr",
|
||||
"--log-level=debug",
|
||||
"--log-format=text",
|
||||
"--update-strategy=immediate",
|
||||
@@ -1080,7 +1079,6 @@ func TestDeployment_excludeLabelPropagationPrefixes(t *testing.T) {
|
||||
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=prefix.com/")
|
||||
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=complete.io/label")
|
||||
}
|
||||
|
||||
func TestNamespaceOverride(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -15,13 +15,13 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.13.0
|
||||
version: 0.11.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.13.0"
|
||||
appVersion: "0.11.0"
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
|
||||
@@ -62,12 +62,12 @@ app.kubernetes.io/instance: {{ include "gha-runner-scale-set.scale-set-name" . }
|
||||
{{- fail "Values.githubConfigSecret is required for setting auth with GitHub server." }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{- include "gha-runner-scale-set.fullname" . | replace "_" "-" }}-github-secret
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-github-secret
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.noPermissionServiceAccountName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . | replace "_" "-" }}-no-permission
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-no-permission
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.kubeModeRoleName" -}}
|
||||
@@ -79,7 +79,7 @@ app.kubernetes.io/instance: {{ include "gha-runner-scale-set.scale-set-name" . }
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . | replace "_" "-" }}-kube-mode
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.dind-init-container" -}}
|
||||
@@ -106,17 +106,6 @@ env:
|
||||
value: "123"
|
||||
securityContext:
|
||||
privileged: true
|
||||
{{- if (ge (.Capabilities.KubeVersion.Minor | int) 29) }}
|
||||
restartPolicy: Always
|
||||
startupProbe:
|
||||
exec:
|
||||
command:
|
||||
- docker
|
||||
- info
|
||||
initialDelaySeconds: 0
|
||||
failureThreshold: 24
|
||||
periodSeconds: 5
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: work
|
||||
mountPath: /home/runner/_work
|
||||
@@ -377,101 +366,6 @@ volumeMounts:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.kubernetes-novolume-mode-runner-container" -}}
|
||||
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
|
||||
{{- range $i, $container := .Values.template.spec.containers }}
|
||||
{{- if eq $container.name "runner" }}
|
||||
{{- $setRunnerImage := "" }}
|
||||
{{- range $key, $val := $container }}
|
||||
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
|
||||
{{- if eq $key "image" }}
|
||||
{{- $setRunnerImage = $val }}
|
||||
{{- end }}
|
||||
{{ $key }}: {{ $val | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- $setContainerHooks := 1 }}
|
||||
{{- $setPodName := 1 }}
|
||||
{{- $setRequireJobContainer := 1 }}
|
||||
{{- $setActionsRunnerImage := 1 }}
|
||||
{{- $setNodeExtraCaCerts := 0 }}
|
||||
{{- $setRunnerUpdateCaCerts := 0 }}
|
||||
{{- if $tlsConfig.runnerMountPath }}
|
||||
{{- $setNodeExtraCaCerts = 1 }}
|
||||
{{- $setRunnerUpdateCaCerts = 1 }}
|
||||
{{- end }}
|
||||
env:
|
||||
{{- with $container.env }}
|
||||
{{- range $i, $env := . }}
|
||||
{{- if eq $env.name "ACTIONS_RUNNER_CONTAINER_HOOKS" }}
|
||||
{{- $setContainerHooks = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "ACTIONS_RUNNER_IMAGE" }}
|
||||
{{- $setActionsRunnerImage = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "ACTIONS_RUNNER_POD_NAME" }}
|
||||
{{- $setPodName = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER" }}
|
||||
{{- $setRequireJobContainer = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
|
||||
{{- $setNodeExtraCaCerts = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
|
||||
{{- $setRunnerUpdateCaCerts = 0 }}
|
||||
{{- end }}
|
||||
- {{ $env | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $setContainerHooks }}
|
||||
- name: ACTIONS_RUNNER_CONTAINER_HOOKS
|
||||
value: /home/runner/k8s-novolume/index.js
|
||||
{{- end }}
|
||||
{{- if $setPodName }}
|
||||
- name: ACTIONS_RUNNER_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
{{- end }}
|
||||
{{- if $setRequireJobContainer }}
|
||||
- name: ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER
|
||||
value: "true"
|
||||
{{- end }}
|
||||
{{- if $setActionsRunnerImage }}
|
||||
- name: ACTIONS_RUNNER_IMAGE
|
||||
value: "{{- $setRunnerImage -}}"
|
||||
{{- end }}
|
||||
{{- if $setNodeExtraCaCerts }}
|
||||
- name: NODE_EXTRA_CA_CERTS
|
||||
value: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
|
||||
{{- end }}
|
||||
{{- if $setRunnerUpdateCaCerts }}
|
||||
- name: RUNNER_UPDATE_CA_CERTS
|
||||
value: "1"
|
||||
{{- end }}
|
||||
{{- $mountGitHubServerTLS := 0 }}
|
||||
{{- if $tlsConfig.runnerMountPath }}
|
||||
{{- $mountGitHubServerTLS = 1 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
{{- with $container.volumeMounts }}
|
||||
{{- range $i, $volMount := . }}
|
||||
{{- if eq $volMount.name "github-server-tls-cert" }}
|
||||
{{- $mountGitHubServerTLS = 0 }}
|
||||
{{- end }}
|
||||
- {{ $volMount | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $mountGitHubServerTLS }}
|
||||
- name: github-server-tls-cert
|
||||
mountPath: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
|
||||
subPath: {{ $tlsConfig.certificateFrom.configMapKeyRef.key }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.default-mode-runner-containers" -}}
|
||||
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
|
||||
{{- range $i, $container := .Values.template.spec.containers }}
|
||||
|
||||
@@ -8,45 +8,26 @@ metadata:
|
||||
{{- if gt (len (include "gha-runner-scale-set.namespace" .)) 63 }}
|
||||
{{ fail "Namespace must have up to 63 characters" }}
|
||||
{{- end }}
|
||||
name: {{ include "gha-runner-scale-set.scale-set-name" . | replace "_" "-" }}
|
||||
name: {{ include "gha-runner-scale-set.scale-set-name" . }}
|
||||
namespace: {{ include "gha-runner-scale-set.namespace" . }}
|
||||
labels:
|
||||
{{- $base := include "gha-runner-scale-set.labels" . | fromYaml }}
|
||||
{{- $extra := dict "app.kubernetes.io/component" "" }}
|
||||
{{- $reserved := merge $base $extra }}
|
||||
{{- with .Values.labels }}
|
||||
{{- range $k, $v := . }}
|
||||
{{- if not (or (hasKey $reserved $k) (hasPrefix "actions.github.com/" $k)) }}
|
||||
{{ $k }}: {{ $v | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.autoscalingRunnerSet.labels }}
|
||||
{{- range $k, $v := . }}
|
||||
{{- if not (or (hasKey $reserved $k) (hasPrefix "actions.github.com/" $k)) }}
|
||||
{{ $k }}: {{ $v | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/component: "autoscaling-runner-set"
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- with .Values.annotations }}
|
||||
{{- range $k, $v := . }}
|
||||
{{- if not (or (hasPrefix "actions.github.com/cleanup-" $k) (eq $k "actions.github.com/values-hash")) }}
|
||||
{{ $k }}: {{ $v | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.autoscalingRunnerSet.annotations }}
|
||||
{{- range $k, $v := . }}
|
||||
{{- if not (or (hasPrefix "actions.github.com/cleanup-" $k) (eq $k "actions.github.com/values-hash")) }}
|
||||
{{ $k }}: {{ $v | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
actions.github.com/values-hash: {{ toJson .Values | sha256sum | trunc 63 }}
|
||||
@@ -56,15 +37,14 @@ metadata:
|
||||
{{- end }}
|
||||
actions.github.com/cleanup-manager-role-binding: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
|
||||
actions.github.com/cleanup-manager-role-name: {{ include "gha-runner-scale-set.managerRoleName" . }}
|
||||
{{- if and (or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume")) (not .Values.template.spec.serviceAccountName) }}
|
||||
{{- if and $containerMode (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
actions.github.com/cleanup-kubernetes-mode-role-binding-name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
|
||||
actions.github.com/cleanup-kubernetes-mode-role-name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
|
||||
actions.github.com/cleanup-kubernetes-mode-service-account-name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
|
||||
{{- end }}
|
||||
{{- if and (ne $containerMode.type "kubernetes") (ne $containerMode.type "kubernetes-novolume") (not .Values.template.spec.serviceAccountName) }}
|
||||
{{- if and (ne $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
actions.github.com/cleanup-no-permission-service-account-name: {{ include "gha-runner-scale-set.noPermissionServiceAccountName" . }}
|
||||
{{- end }}
|
||||
|
||||
spec:
|
||||
githubConfigUrl: {{ required ".Values.githubConfigUrl is required" (trimSuffix "/" .Values.githubConfigUrl) }}
|
||||
githubConfigSecret: {{ include "gha-runner-scale-set.githubsecret" . }}
|
||||
@@ -85,24 +65,6 @@ spec:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if and .Values.keyVault .Values.keyVault.type }}
|
||||
vaultConfig:
|
||||
type: {{ .Values.keyVault.type }}
|
||||
{{- if .Values.keyVault.proxy }}
|
||||
proxy: {{- toYaml .Values.keyVault.proxy | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.keyVault.type "azure_key_vault" }}
|
||||
azureKeyVault:
|
||||
url: {{ .Values.keyVault.azureKeyVault.url }}
|
||||
tenantId: {{ .Values.keyVault.azureKeyVault.tenantId }}
|
||||
clientId: {{ .Values.keyVault.azureKeyVault.clientId }}
|
||||
certificatePath: {{ .Values.keyVault.azureKeyVault.certificatePath }}
|
||||
secretKey: {{ .Values.keyVault.azureKeyVault.secretKey }}
|
||||
{{- else }}
|
||||
{{- fail "Unsupported keyVault type: " .Values.keyVault.type }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.proxy }}
|
||||
proxy:
|
||||
{{- if .Values.proxy.http }}
|
||||
@@ -176,7 +138,7 @@ spec:
|
||||
restartPolicy: Never
|
||||
{{- end }}
|
||||
{{- $containerMode := .Values.containerMode }}
|
||||
{{- if or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume") }}
|
||||
{{- if eq $containerMode.type "kubernetes" }}
|
||||
serviceAccountName: {{ default (include "gha-runner-scale-set.kubeModeServiceAccountName" .) .Values.template.spec.serviceAccountName }}
|
||||
{{- else }}
|
||||
serviceAccountName: {{ default (include "gha-runner-scale-set.noPermissionServiceAccountName" .) .Values.template.spec.serviceAccountName }}
|
||||
@@ -186,10 +148,6 @@ spec:
|
||||
{{- if eq $containerMode.type "dind" }}
|
||||
- name: init-dind-externals
|
||||
{{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }}
|
||||
{{- if (ge (.Capabilities.KubeVersion.Minor | int) 29) }}
|
||||
- name: dind
|
||||
{{- include "gha-runner-scale-set.dind-container" . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.template.spec.initContainers }}
|
||||
{{- toYaml . | nindent 6 }}
|
||||
@@ -199,24 +157,18 @@ spec:
|
||||
{{- if eq $containerMode.type "dind" }}
|
||||
- name: runner
|
||||
{{- include "gha-runner-scale-set.dind-runner-container" . | nindent 8 }}
|
||||
{{- if not (ge (.Capabilities.KubeVersion.Minor | int) 29) }}
|
||||
- name: dind
|
||||
{{- include "gha-runner-scale-set.dind-container" . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- include "gha-runner-scale-set.non-runner-non-dind-containers" . | nindent 6 }}
|
||||
{{- else if eq $containerMode.type "kubernetes" }}
|
||||
- name: runner
|
||||
{{- include "gha-runner-scale-set.kubernetes-mode-runner-container" . | nindent 8 }}
|
||||
{{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }}
|
||||
{{- else if eq $containerMode.type "kubernetes-novolume" }}
|
||||
- name: runner
|
||||
{{- include "gha-runner-scale-set.kubernetes-novolume-mode-runner-container" . | nindent 8 }}
|
||||
{{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }}
|
||||
{{- else }}
|
||||
{{- include "gha-runner-scale-set.default-mode-runner-containers" . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
|
||||
{{- if or .Values.template.spec.volumes (eq $containerMode.type "dind") (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume") $tlsConfig.runnerMountPath }}
|
||||
{{- if or .Values.template.spec.volumes (eq $containerMode.type "dind") (eq $containerMode.type "kubernetes") $tlsConfig.runnerMountPath }}
|
||||
volumes:
|
||||
{{- if $tlsConfig.runnerMountPath }}
|
||||
{{- include "gha-runner-scale-set.tls-volume" $tlsConfig | nindent 6 }}
|
||||
|
||||
@@ -6,15 +6,8 @@ metadata:
|
||||
name: {{ include "gha-runner-scale-set.githubsecret" . }}
|
||||
namespace: {{ include "gha-runner-scale-set.namespace" . }}
|
||||
labels:
|
||||
{{- $base := include "gha-runner-scale-set.labels" . | fromYaml }}
|
||||
{{- $extra := dict "app.kubernetes.io/component" "" }}
|
||||
{{- $reserved := merge $base $extra }}
|
||||
{{- with .Values.labels }}
|
||||
{{- range $k, $v := . }}
|
||||
{{- if not (or (hasKey $reserved $k) (hasPrefix "actions.github.com/" $k)) }}
|
||||
{{ $k }}: {{ $v | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.githubConfigSecret.labels }}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{{- $containerMode := .Values.containerMode }}
|
||||
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeRole) }}
|
||||
{{- if and (or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume")) (not .Values.template.spec.serviceAccountName) }}
|
||||
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
# default permission for runner pod service account in kubernetes mode (container hook)
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
@@ -8,15 +8,8 @@ metadata:
|
||||
name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
|
||||
namespace: {{ include "gha-runner-scale-set.namespace" . }}
|
||||
labels:
|
||||
{{- $base := include "gha-runner-scale-set.labels" . | fromYaml }}
|
||||
{{- $extra := dict "app.kubernetes.io/component" "" }}
|
||||
{{- $reserved := merge $base $extra }}
|
||||
{{- with .Values.labels }}
|
||||
{{- range $k, $v := . }}
|
||||
{{- if not (or (hasKey $reserved $k) (hasPrefix "actions.github.com/" $k)) }}
|
||||
{{ $k }}: {{ $v | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.kubernetesModeRole.labels }}
|
||||
@@ -45,11 +38,9 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/log"]
|
||||
verbs: ["get", "list", "watch",]
|
||||
{{- if ne $containerMode.type "kubernetes-novolume" }}
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
{{- end }}
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
|
||||
@@ -1,21 +1,14 @@
|
||||
{{- $containerMode := .Values.containerMode }}
|
||||
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeRoleBinding) }}
|
||||
{{- if and (or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume")) (not .Values.template.spec.serviceAccountName) }}
|
||||
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
|
||||
namespace: {{ include "gha-runner-scale-set.namespace" . }}
|
||||
labels:
|
||||
{{- $base := include "gha-runner-scale-set.labels" . | fromYaml }}
|
||||
{{- $extra := dict "app.kubernetes.io/component" "" }}
|
||||
{{- $reserved := merge $base $extra }}
|
||||
{{- with .Values.labels }}
|
||||
{{- range $k, $v := . }}
|
||||
{{- if not (or (hasKey $reserved $k) (hasPrefix "actions.github.com/" $k)) }}
|
||||
{{ $k }}: {{ $v | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.kubernetesModeRoleBinding.labels }}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{{- $containerMode := .Values.containerMode }}
|
||||
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeServiceAccount) }}
|
||||
{{- if and (or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume")) (not .Values.template.spec.serviceAccountName) }}
|
||||
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
@@ -18,15 +18,8 @@ metadata:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- $base := include "gha-runner-scale-set.labels" . | fromYaml }}
|
||||
{{- $extra := dict "app.kubernetes.io/component" "" }}
|
||||
{{- $reserved := merge $base $extra }}
|
||||
{{- with .Values.labels }}
|
||||
{{- range $k, $v := . }}
|
||||
{{- if not (or (hasKey $reserved $k) (hasPrefix "actions.github.com/" $k)) }}
|
||||
{{ $k }}: {{ $v | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.kubernetesModeServiceAccount.labels }}
|
||||
|
||||
@@ -5,15 +5,8 @@ metadata:
|
||||
name: {{ include "gha-runner-scale-set.managerRoleName" . }}
|
||||
namespace: {{ include "gha-runner-scale-set.namespace" . }}
|
||||
labels:
|
||||
{{- $base := include "gha-runner-scale-set.labels" . | fromYaml }}
|
||||
{{- $extra := dict "app.kubernetes.io/component" "manager-role" }}
|
||||
{{- $reserved := merge $base $extra }}
|
||||
{{- with .Values.labels }}
|
||||
{{- range $k, $v := . }}
|
||||
{{- if not (or (hasKey $reserved $k) (hasPrefix "actions.github.com/" $k)) }}
|
||||
{{ $k }}: {{ $v | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.managerRole.labels }}
|
||||
|
||||
@@ -5,15 +5,8 @@ metadata:
|
||||
name: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
|
||||
namespace: {{ include "gha-runner-scale-set.namespace" . }}
|
||||
labels:
|
||||
{{- $base := include "gha-runner-scale-set.labels" . | fromYaml }}
|
||||
{{- $extra := dict "app.kubernetes.io/component" "manager-role-binding" }}
|
||||
{{- $reserved := merge $base $extra }}
|
||||
{{- with .Values.labels }}
|
||||
{{- range $k, $v := . }}
|
||||
{{- if not (or (hasKey $reserved $k) (hasPrefix "actions.github.com/" $k)) }}
|
||||
{{ $k }}: {{ $v | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.managerRoleBinding.labels }}
|
||||
|
||||
@@ -7,15 +7,8 @@ metadata:
|
||||
name: {{ include "gha-runner-scale-set.noPermissionServiceAccountName" . }}
|
||||
namespace: {{ include "gha-runner-scale-set.namespace" . }}
|
||||
labels:
|
||||
{{- $base := include "gha-runner-scale-set.labels" . | fromYaml }}
|
||||
{{- $extra := dict "app.kubernetes.io/component" "" }}
|
||||
{{- $reserved := merge $base $extra }}
|
||||
{{- with .Values.labels }}
|
||||
{{- range $k, $v := . }}
|
||||
{{- if not (or (hasKey $reserved $k) (hasPrefix "actions.github.com/" $k)) }}
|
||||
{{ $k }}: {{ $v | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.noPermissionServiceAccount.labels }}
|
||||
|
||||
@@ -204,6 +204,7 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) {
|
||||
|
||||
func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
@@ -269,72 +270,6 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
||||
assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
|
||||
}
|
||||
|
||||
func TestTemplateRenderedSetServiceAccountToKubeNoVolumeMode(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-runners"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"containerMode.type": "kubernetes-novolume",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_serviceaccount.yaml"})
|
||||
var serviceAccount corev1.ServiceAccount
|
||||
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
|
||||
|
||||
assert.Equal(t, namespaceName, serviceAccount.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-rs-kube-mode", serviceAccount.Name)
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"})
|
||||
var role rbacv1.Role
|
||||
helm.UnmarshalK8SYaml(t, output, &role)
|
||||
|
||||
assert.Equal(t, namespaceName, role.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-rs-kube-mode", role.Name)
|
||||
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", role.Finalizers[0])
|
||||
|
||||
assert.Len(t, role.Rules, 4, "kube mode role should have 4 rules")
|
||||
assert.Equal(t, "pods", role.Rules[0].Resources[0])
|
||||
assert.Equal(t, "pods/exec", role.Rules[1].Resources[0])
|
||||
assert.Equal(t, "pods/log", role.Rules[2].Resources[0])
|
||||
assert.Equal(t, "secrets", role.Rules[3].Resources[0])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role_binding.yaml"})
|
||||
var roleBinding rbacv1.RoleBinding
|
||||
helm.UnmarshalK8SYaml(t, output, &roleBinding)
|
||||
|
||||
assert.Equal(t, namespaceName, roleBinding.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.Name)
|
||||
assert.Len(t, roleBinding.Subjects, 1)
|
||||
assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace)
|
||||
assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "Role", roleBinding.RoleRef.Kind)
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var ars v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
|
||||
expectedServiceAccountName := "test-runners-gha-rs-kube-mode"
|
||||
assert.Equal(t, expectedServiceAccountName, ars.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
|
||||
}
|
||||
|
||||
func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -793,20 +728,20 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraInitContainers(t *testin
|
||||
var ars v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
|
||||
assert.Len(t, ars.Spec.Template.Spec.InitContainers, 4, "InitContainers should be 4")
|
||||
assert.Equal(t, "kube-init", ars.Spec.Template.Spec.InitContainers[2].Name, "InitContainers[1] Name should be kube-init")
|
||||
assert.Equal(t, "runner-image:latest", ars.Spec.Template.Spec.InitContainers[2].Image, "InitContainers[1] Image should be runner-image:latest")
|
||||
assert.Equal(t, "sudo", ars.Spec.Template.Spec.InitContainers[2].Command[0], "InitContainers[1] Command[0] should be sudo")
|
||||
assert.Equal(t, "chown", ars.Spec.Template.Spec.InitContainers[2].Command[1], "InitContainers[1] Command[1] should be chown")
|
||||
assert.Equal(t, "-R", ars.Spec.Template.Spec.InitContainers[2].Command[2], "InitContainers[1] Command[2] should be -R")
|
||||
assert.Equal(t, "1001:123", ars.Spec.Template.Spec.InitContainers[2].Command[3], "InitContainers[1] Command[3] should be 1001:123")
|
||||
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[2].Command[4], "InitContainers[1] Command[4] should be /home/runner/_work")
|
||||
assert.Equal(t, "work", ars.Spec.Template.Spec.InitContainers[2].VolumeMounts[0].Name, "InitContainers[1] VolumeMounts[0] Name should be work")
|
||||
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[2].VolumeMounts[0].MountPath, "InitContainers[1] VolumeMounts[0] MountPath should be /home/runner/_work")
|
||||
assert.Len(t, ars.Spec.Template.Spec.InitContainers, 3, "InitContainers should be 3")
|
||||
assert.Equal(t, "kube-init", ars.Spec.Template.Spec.InitContainers[1].Name, "InitContainers[1] Name should be kube-init")
|
||||
assert.Equal(t, "runner-image:latest", ars.Spec.Template.Spec.InitContainers[1].Image, "InitContainers[1] Image should be runner-image:latest")
|
||||
assert.Equal(t, "sudo", ars.Spec.Template.Spec.InitContainers[1].Command[0], "InitContainers[1] Command[0] should be sudo")
|
||||
assert.Equal(t, "chown", ars.Spec.Template.Spec.InitContainers[1].Command[1], "InitContainers[1] Command[1] should be chown")
|
||||
assert.Equal(t, "-R", ars.Spec.Template.Spec.InitContainers[1].Command[2], "InitContainers[1] Command[2] should be -R")
|
||||
assert.Equal(t, "1001:123", ars.Spec.Template.Spec.InitContainers[1].Command[3], "InitContainers[1] Command[3] should be 1001:123")
|
||||
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[1].Command[4], "InitContainers[1] Command[4] should be /home/runner/_work")
|
||||
assert.Equal(t, "work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].Name, "InitContainers[1] VolumeMounts[0] Name should be work")
|
||||
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].MountPath, "InitContainers[1] VolumeMounts[0] MountPath should be /home/runner/_work")
|
||||
|
||||
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[3].Name, "InitContainers[2] Name should be ls")
|
||||
assert.Equal(t, "ubuntu:latest", ars.Spec.Template.Spec.InitContainers[3].Image, "InitContainers[2] Image should be ubuntu:latest")
|
||||
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[3].Command[0], "InitContainers[2] Command[0] should be ls")
|
||||
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Name, "InitContainers[2] Name should be ls")
|
||||
assert.Equal(t, "ubuntu:latest", ars.Spec.Template.Spec.InitContainers[2].Image, "InitContainers[2] Image should be ubuntu:latest")
|
||||
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Command[0], "InitContainers[2] Command[0] should be ls")
|
||||
}
|
||||
|
||||
func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) {
|
||||
@@ -925,26 +860,13 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
|
||||
|
||||
assert.NotNil(t, ars.Spec.Template.Spec, "Template.Spec should not be nil")
|
||||
|
||||
assert.Len(t, ars.Spec.Template.Spec.InitContainers, 2, "Template.Spec should have 2 init container")
|
||||
assert.Len(t, ars.Spec.Template.Spec.InitContainers, 1, "Template.Spec should have 1 init container")
|
||||
assert.Equal(t, "init-dind-externals", ars.Spec.Template.Spec.InitContainers[0].Name)
|
||||
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.InitContainers[0].Image)
|
||||
assert.Equal(t, "cp", ars.Spec.Template.Spec.InitContainers[0].Command[0])
|
||||
assert.Equal(t, "-r /home/runner/externals/. /home/runner/tmpDir/", strings.Join(ars.Spec.Template.Spec.InitContainers[0].Args, " "))
|
||||
|
||||
assert.Equal(t, "dind", ars.Spec.Template.Spec.InitContainers[1].Name)
|
||||
assert.Equal(t, "docker:dind", ars.Spec.Template.Spec.InitContainers[1].Image)
|
||||
assert.True(t, *ars.Spec.Template.Spec.InitContainers[1].SecurityContext.Privileged)
|
||||
assert.Len(t, ars.Spec.Template.Spec.InitContainers[1].VolumeMounts, 3, "The dind container should have 3 volume mounts, dind-sock, work and externals")
|
||||
assert.Equal(t, "work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].Name)
|
||||
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].MountPath)
|
||||
|
||||
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[1].Name)
|
||||
assert.Equal(t, "/var/run", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[1].MountPath)
|
||||
|
||||
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[2].Name)
|
||||
assert.Equal(t, "/home/runner/externals", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[2].MountPath)
|
||||
|
||||
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "Template.Spec should have 1 container")
|
||||
assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "Template.Spec should have 2 container")
|
||||
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name)
|
||||
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image)
|
||||
assert.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 2, "The runner container should have 2 env vars, DOCKER_HOST and RUNNER_WAIT_FOR_DOCKER_IN_SECONDS")
|
||||
@@ -961,6 +883,19 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
|
||||
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name)
|
||||
assert.Equal(t, "/var/run", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath)
|
||||
|
||||
assert.Equal(t, "dind", ars.Spec.Template.Spec.Containers[1].Name)
|
||||
assert.Equal(t, "docker:dind", ars.Spec.Template.Spec.Containers[1].Image)
|
||||
assert.True(t, *ars.Spec.Template.Spec.Containers[1].SecurityContext.Privileged)
|
||||
assert.Len(t, ars.Spec.Template.Spec.Containers[1].VolumeMounts, 3, "The dind container should have 3 volume mounts, dind-sock, work and externals")
|
||||
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].Name)
|
||||
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].MountPath)
|
||||
|
||||
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].Name)
|
||||
assert.Equal(t, "/var/run", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].MountPath)
|
||||
|
||||
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].Name)
|
||||
assert.Equal(t, "/home/runner/externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].MountPath)
|
||||
|
||||
assert.Len(t, ars.Spec.Template.Spec.Volumes, 3, "Volumes should be 3")
|
||||
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Volumes[0].Name, "Volume name should be dind-sock")
|
||||
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Volumes[1].Name, "Volume name should be dind-externals")
|
||||
@@ -1026,65 +961,6 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T)
|
||||
assert.NotNil(t, ars.Spec.Template.Spec.Volumes[0].Ephemeral, "Template.Spec should have 1 ephemeral volume")
|
||||
}
|
||||
|
||||
func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesModeNoVolume(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-runners"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"containerMode.type": "kubernetes-novolume",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
|
||||
var ars v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
|
||||
assert.Equal(t, namespaceName, ars.Namespace)
|
||||
assert.Equal(t, "test-runners", ars.Name)
|
||||
|
||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
||||
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
||||
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
|
||||
|
||||
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
|
||||
assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil")
|
||||
assert.Nil(t, ars.Spec.MaxRunners, "MaxRunners should be nil")
|
||||
assert.Nil(t, ars.Spec.Proxy, "Proxy should be nil")
|
||||
assert.Nil(t, ars.Spec.GitHubServerTLS, "GitHubServerTLS should be nil")
|
||||
|
||||
assert.NotNil(t, ars.Spec.Template.Spec, "Template.Spec should not be nil")
|
||||
|
||||
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "Template.Spec should have 1 container")
|
||||
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name)
|
||||
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image)
|
||||
|
||||
require.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 4, "The runner container should have 4 env vars")
|
||||
assert.Equal(t, "ACTIONS_RUNNER_CONTAINER_HOOKS", ars.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||
assert.Equal(t, "/home/runner/k8s-novolume/index.js", ars.Spec.Template.Spec.Containers[0].Env[0].Value)
|
||||
assert.Equal(t, "ACTIONS_RUNNER_POD_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||
assert.Equal(t, "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER", ars.Spec.Template.Spec.Containers[0].Env[2].Name)
|
||||
assert.Equal(t, "true", ars.Spec.Template.Spec.Containers[0].Env[2].Value)
|
||||
assert.Equal(t, "ACTIONS_RUNNER_IMAGE", ars.Spec.Template.Spec.Containers[0].Env[3].Name)
|
||||
assert.Equal(t, ars.Spec.Template.Spec.Containers[0].Image, ars.Spec.Template.Spec.Containers[0].Env[3].Value)
|
||||
|
||||
assert.Len(t, ars.Spec.Template.Spec.Volumes, 0, "Template.Spec should have 0 volumes")
|
||||
}
|
||||
|
||||
func TestTemplateRenderedAutoscalingRunnerSet_ListenerPodTemplate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -1264,7 +1140,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("providing githubServerTLS.runnerMountPath", func(t *testing.T) {
|
||||
t.Run("mode default", func(t *testing.T) {
|
||||
t.Run("mode: default", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
@@ -1282,7 +1158,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
ars := render(t, options)
|
||||
|
||||
require.NotNil(t, ars.Spec.GitHubServerTLS)
|
||||
expected := &v1alpha1.TLSConfig{
|
||||
expected := &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1323,7 +1199,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("mode dind", func(t *testing.T) {
|
||||
t.Run("mode: dind", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
@@ -1342,7 +1218,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
ars := render(t, options)
|
||||
|
||||
require.NotNil(t, ars.Spec.GitHubServerTLS)
|
||||
expected := &v1alpha1.TLSConfig{
|
||||
expected := &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1383,7 +1259,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("mode kubernetes", func(t *testing.T) {
|
||||
t.Run("mode: kubernetes", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
@@ -1402,67 +1278,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
ars := render(t, options)
|
||||
|
||||
require.NotNil(t, ars.Spec.GitHubServerTLS)
|
||||
expected := &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "certs-configmap",
|
||||
},
|
||||
Key: "cert.pem",
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, expected, ars.Spec.GitHubServerTLS)
|
||||
|
||||
var volume *corev1.Volume
|
||||
for _, v := range ars.Spec.Template.Spec.Volumes {
|
||||
if v.Name == "github-server-tls-cert" {
|
||||
volume = &v
|
||||
break
|
||||
}
|
||||
}
|
||||
require.NotNil(t, volume)
|
||||
assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
|
||||
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
|
||||
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)
|
||||
|
||||
assert.Contains(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
|
||||
Name: "github-server-tls-cert",
|
||||
MountPath: "/runner/mount/path/cert.pem",
|
||||
SubPath: "cert.pem",
|
||||
})
|
||||
|
||||
assert.Contains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{
|
||||
Name: "NODE_EXTRA_CA_CERTS",
|
||||
Value: "/runner/mount/path/cert.pem",
|
||||
})
|
||||
|
||||
assert.Contains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{
|
||||
Name: "RUNNER_UPDATE_CA_CERTS",
|
||||
Value: "1",
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("mode kubernetes-novolume", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret": "pre-defined-secrets",
|
||||
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
|
||||
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
|
||||
"githubServerTLS.runnerMountPath": "/runner/mount/path",
|
||||
"containerMode.type": "kubernetes-novolume",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
ars := render(t, options)
|
||||
|
||||
require.NotNil(t, ars.Spec.GitHubServerTLS)
|
||||
expected := &v1alpha1.TLSConfig{
|
||||
expected := &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1505,7 +1321,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("without providing githubServerTLS.runnerMountPath", func(t *testing.T) {
|
||||
t.Run("mode default", func(t *testing.T) {
|
||||
t.Run("mode: default", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
@@ -1522,7 +1338,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
ars := render(t, options)
|
||||
|
||||
require.NotNil(t, ars.Spec.GitHubServerTLS)
|
||||
expected := &v1alpha1.TLSConfig{
|
||||
expected := &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1560,7 +1376,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("mode dind", func(t *testing.T) {
|
||||
t.Run("mode: dind", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
@@ -1578,7 +1394,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
ars := render(t, options)
|
||||
|
||||
require.NotNil(t, ars.Spec.GitHubServerTLS)
|
||||
expected := &v1alpha1.TLSConfig{
|
||||
expected := &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1616,7 +1432,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("mode kubernetes", func(t *testing.T) {
|
||||
t.Run("mode: kubernetes", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
@@ -1634,63 +1450,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
ars := render(t, options)
|
||||
|
||||
require.NotNil(t, ars.Spec.GitHubServerTLS)
|
||||
expected := &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "certs-configmap",
|
||||
},
|
||||
Key: "cert.pem",
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, expected, ars.Spec.GitHubServerTLS)
|
||||
|
||||
var volume *corev1.Volume
|
||||
for _, v := range ars.Spec.Template.Spec.Volumes {
|
||||
if v.Name == "github-server-tls-cert" {
|
||||
volume = &v
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Nil(t, volume)
|
||||
|
||||
assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
|
||||
Name: "github-server-tls-cert",
|
||||
MountPath: "/runner/mount/path/cert.pem",
|
||||
SubPath: "cert.pem",
|
||||
})
|
||||
|
||||
assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{
|
||||
Name: "NODE_EXTRA_CA_CERTS",
|
||||
Value: "/runner/mount/path/cert.pem",
|
||||
})
|
||||
|
||||
assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{
|
||||
Name: "RUNNER_UPDATE_CA_CERTS",
|
||||
Value: "1",
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("mode kubernetes-novolume", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret": "pre-defined-secrets",
|
||||
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
|
||||
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
|
||||
"containerMode.type": "kubernetes-novolume",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
ars := render(t, options)
|
||||
|
||||
require.NotNil(t, ars.Spec.GitHubServerTLS)
|
||||
expected := &v1alpha1.TLSConfig{
|
||||
expected := &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -2066,7 +1826,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinDMergePodSpec(t *testing.T) {
|
||||
var ars v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
|
||||
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "There should be 1 containers")
|
||||
assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "There should be 2 containers")
|
||||
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner")
|
||||
assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String(), "CPU Limit should be set")
|
||||
assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(), "Memory Limit should be set")
|
||||
@@ -2191,8 +1951,6 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_GitHubSecret(t *testing.
|
||||
func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, mode := range []string{"kubernetes", "kubernetes-novolume"} {
|
||||
t.Run("containerMode "+mode, func(t *testing.T) {
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
@@ -2207,7 +1965,7 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
"containerMode.type": mode,
|
||||
"containerMode.type": "kubernetes",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
@@ -2228,8 +1986,6 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t
|
||||
for annotation, value := range annotationValues {
|
||||
assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunnerContainerEnvNotEmptyMap(t *testing.T) {
|
||||
@@ -2660,21 +2416,6 @@ func TestNamespaceOverride(t *testing.T) {
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
|
||||
},
|
||||
},
|
||||
"kube_novolume_mode_role": {
|
||||
file: "kube_mode_role.yaml",
|
||||
options: &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"namespaceOverride": namespaceOverride,
|
||||
"containerMode.type": "kubernetes-novolume",
|
||||
"controllerServiceAccount.name": "foo",
|
||||
"controllerServiceAccount.namespace": "bar",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"githubConfigUrl": "https://github.com",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
|
||||
},
|
||||
},
|
||||
"kube_mode_role_binding": {
|
||||
file: "kube_mode_role_binding.yaml",
|
||||
options: &helm.Options{
|
||||
@@ -2690,21 +2431,6 @@ func TestNamespaceOverride(t *testing.T) {
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
|
||||
},
|
||||
},
|
||||
"kube_novolume_mode_role_binding": {
|
||||
file: "kube_mode_role_binding.yaml",
|
||||
options: &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"namespaceOverride": namespaceOverride,
|
||||
"containerMode.type": "kubernetes-novolume",
|
||||
"controllerServiceAccount.name": "foo",
|
||||
"controllerServiceAccount.namespace": "bar",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"githubConfigUrl": "https://github.com",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
|
||||
},
|
||||
},
|
||||
"kube_mode_serviceaccount": {
|
||||
file: "kube_mode_serviceaccount.yaml",
|
||||
options: &helm.Options{
|
||||
@@ -2720,21 +2446,6 @@ func TestNamespaceOverride(t *testing.T) {
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
|
||||
},
|
||||
},
|
||||
"kube_novolume_mode_serviceaccount": {
|
||||
file: "kube_mode_serviceaccount.yaml",
|
||||
options: &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"namespaceOverride": namespaceOverride,
|
||||
"containerMode.type": "kubernetes-novolume",
|
||||
"controllerServiceAccount.name": "foo",
|
||||
"controllerServiceAccount.namespace": "bar",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"githubConfigUrl": "https://github.com",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range tt {
|
||||
@@ -2757,43 +2468,3 @@ func TestNamespaceOverride(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoscalingRunnerSetCustomAnnotationsAndLabelsApplied(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-runners"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
"annotations.actions\\.github\\.com/vault": "azure_key_vault",
|
||||
"annotations.actions\\.github\\.com/cleanup-manager-role-name": "not-propagated",
|
||||
"labels.custom": "custom",
|
||||
"labels.app\\.kubernetes\\.io/component": "not-propagated",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
|
||||
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||
|
||||
vault := autoscalingRunnerSet.Annotations["actions.github.com/vault"]
|
||||
assert.Equal(t, "azure_key_vault", vault)
|
||||
|
||||
custom := autoscalingRunnerSet.Labels["custom"]
|
||||
assert.Equal(t, "custom", custom)
|
||||
|
||||
assert.NotEqual(t, "not-propagated", autoscalingRunnerSet.Annotations["actions.github.com/cleanup-manager-role-name"])
|
||||
assert.NotEqual(t, "not-propagated", autoscalingRunnerSet.Labels["app.kubernetes.io/component"])
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
## githubConfigUrl is the GitHub url for where you want to configure runners
|
||||
## ex: https://github.com/myorg/myrepo or https://github.com/myorg or https://github.com/enterprises/myenterprise
|
||||
## ex: https://github.com/myorg/myrepo or https://github.com/myorg
|
||||
githubConfigUrl: ""
|
||||
|
||||
## githubConfigSecret is the k8s secret information to use when authenticating via the GitHub API.
|
||||
## You can choose to supply:
|
||||
## A) a PAT token,
|
||||
## B) a GitHub App, or
|
||||
## C) a pre-defined secret.
|
||||
## C) a pre-defined Kubernetes secret.
|
||||
## The syntax for each of these variations is documented below.
|
||||
## (Variation A) When using a PAT token, the syntax is as follows:
|
||||
githubConfigSecret:
|
||||
@@ -17,7 +17,6 @@ githubConfigSecret:
|
||||
## (Variation B) When using a GitHub App, the syntax is as follows:
|
||||
# githubConfigSecret:
|
||||
# # NOTE: IDs MUST be strings, use quotes
|
||||
# # The github_app_id can be an app_id or the client_id
|
||||
# github_app_id: ""
|
||||
# github_app_installation_id: ""
|
||||
# github_app_private_key: |
|
||||
@@ -28,11 +27,8 @@ githubConfigSecret:
|
||||
# .
|
||||
# private key line N
|
||||
#
|
||||
## (Variation C) When using a pre-defined secret.
|
||||
## The secret can be pulled either directly from Kubernetes, or from the vault, depending on configuration.
|
||||
## Kubernetes secret in the same namespace that the gha-runner-scale-set is going to deploy.
|
||||
## On the other hand, if the vault is configured, secret name will be used to fetch the app configuration.
|
||||
## The syntax is as follows:
|
||||
## (Variation C) When using a pre-defined Kubernetes secret in the same namespace that the gha-runner-scale-set is going to deploy,
|
||||
## the syntax is as follows:
|
||||
# githubConfigSecret: pre-defined-secret
|
||||
## Notes on using pre-defined Kubernetes secrets:
|
||||
## You need to make sure your predefined secret has all the required secret data set properly.
|
||||
@@ -88,26 +84,6 @@ githubConfigSecret:
|
||||
# key: ca.crt
|
||||
# runnerMountPath: /usr/local/share/ca-certificates/
|
||||
|
||||
# keyVault:
|
||||
# Available values: "azure_key_vault"
|
||||
# type: ""
|
||||
# Configuration related to azure key vault
|
||||
# azure_key_vault:
|
||||
# url: ""
|
||||
# client_id: ""
|
||||
# tenant_id: ""
|
||||
# certificate_path: ""
|
||||
# proxy:
|
||||
# http:
|
||||
# url: http://proxy.com:1234
|
||||
# credentialSecretRef: proxy-auth # a secret with `username` and `password` keys
|
||||
# https:
|
||||
# url: http://proxy.com:1234
|
||||
# credentialSecretRef: proxy-auth # a secret with `username` and `password` keys
|
||||
# noProxy:
|
||||
# - example.com
|
||||
# - example.org
|
||||
|
||||
## Container mode is an object that provides out-of-box configuration
|
||||
## for dind and kubernetes mode. Template will be modified as documented under the
|
||||
## template object.
|
||||
@@ -115,7 +91,7 @@ githubConfigSecret:
|
||||
## If any customization is required for dind or kubernetes mode, containerMode should remain
|
||||
## empty, and configuration should be applied to the template.
|
||||
# containerMode:
|
||||
# type: "dind" ## type can be set to "dind", "kubernetes", or "kubernetes-novolume"
|
||||
# type: "dind" ## type can be set to dind or kubernetes
|
||||
# ## the following is required when containerMode.type=kubernetes
|
||||
# kubernetesModeWorkVolumeClaim:
|
||||
# accessModes: ["ReadWriteOnce"]
|
||||
@@ -154,7 +130,7 @@ githubConfigSecret:
|
||||
# counters:
|
||||
# gha_started_jobs_total:
|
||||
# labels:
|
||||
# ["repository", "organization", "enterprise", "job_name", "event_name", "job_workflow_ref", "job_workflow_name", "job_workflow_target"]
|
||||
# ["repository", "organization", "enterprise", "job_name", "event_name"]
|
||||
# gha_completed_jobs_total:
|
||||
# labels:
|
||||
# [
|
||||
@@ -164,9 +140,6 @@ githubConfigSecret:
|
||||
# "job_name",
|
||||
# "event_name",
|
||||
# "job_result",
|
||||
# "job_workflow_ref",
|
||||
# "job_workflow_name",
|
||||
# "job_workflow_target",
|
||||
# ]
|
||||
# gauges:
|
||||
# gha_assigned_jobs:
|
||||
@@ -188,7 +161,7 @@ githubConfigSecret:
|
||||
# histograms:
|
||||
# gha_job_startup_duration_seconds:
|
||||
# labels:
|
||||
# ["repository", "organization", "enterprise", "job_name", "event_name","job_workflow_ref", "job_workflow_name", "job_workflow_target"]
|
||||
# ["repository", "organization", "enterprise", "job_name", "event_name"]
|
||||
# buckets:
|
||||
# [
|
||||
# 0.01,
|
||||
@@ -246,9 +219,6 @@ githubConfigSecret:
|
||||
# "job_name",
|
||||
# "event_name",
|
||||
# "job_result",
|
||||
# "job_workflow_ref",
|
||||
# "job_workflow_name",
|
||||
# "job_workflow_target"
|
||||
# ]
|
||||
# buckets:
|
||||
# [
|
||||
@@ -313,6 +283,18 @@ template:
|
||||
## volumeMounts:
|
||||
## - name: dind-externals
|
||||
## mountPath: /home/runner/tmpDir
|
||||
## containers:
|
||||
## - name: runner
|
||||
## image: ghcr.io/actions/actions-runner:latest
|
||||
## command: ["/home/runner/run.sh"]
|
||||
## env:
|
||||
## - name: DOCKER_HOST
|
||||
## value: unix:///var/run/docker.sock
|
||||
## volumeMounts:
|
||||
## - name: work
|
||||
## mountPath: /home/runner/_work
|
||||
## - name: dind-sock
|
||||
## mountPath: /var/run
|
||||
## - name: dind
|
||||
## image: docker:dind
|
||||
## args:
|
||||
@@ -324,15 +306,6 @@ template:
|
||||
## value: "123"
|
||||
## securityContext:
|
||||
## privileged: true
|
||||
## restartPolicy: Always
|
||||
## startupProbe:
|
||||
## exec:
|
||||
## command:
|
||||
## - docker
|
||||
## - info
|
||||
## initialDelaySeconds: 0
|
||||
## failureThreshold: 24
|
||||
## periodSeconds: 5
|
||||
## volumeMounts:
|
||||
## - name: work
|
||||
## mountPath: /home/runner/_work
|
||||
@@ -340,20 +313,6 @@ template:
|
||||
## mountPath: /var/run
|
||||
## - name: dind-externals
|
||||
## mountPath: /home/runner/externals
|
||||
## containers:
|
||||
## - name: runner
|
||||
## image: ghcr.io/actions/actions-runner:latest
|
||||
## command: ["/home/runner/run.sh"]
|
||||
## env:
|
||||
## - name: DOCKER_HOST
|
||||
## value: unix:///var/run/docker.sock
|
||||
## - name: RUNNER_WAIT_FOR_DOCKER_IN_SECONDS
|
||||
## value: "120"
|
||||
## volumeMounts:
|
||||
## - name: work
|
||||
## mountPath: /home/runner/_work
|
||||
## - name: dind-sock
|
||||
## mountPath: /var/run
|
||||
## volumes:
|
||||
## - name: work
|
||||
## emptyDir: {}
|
||||
@@ -391,25 +350,6 @@ template:
|
||||
## resources:
|
||||
## requests:
|
||||
## storage: 1Gi
|
||||
######################################################################################################
|
||||
## with containerMode.type=kubernetes-novolume, we will populate the template.spec with following pod spec
|
||||
## template:
|
||||
## spec:
|
||||
## containers:
|
||||
## - name: runner
|
||||
## image: ghcr.io/actions/actions-runner:latest
|
||||
## command: ["/home/runner/run.sh"]
|
||||
## env:
|
||||
## - name: ACTIONS_RUNNER_CONTAINER_HOOKS
|
||||
## value: /home/runner/k8s-novolume/index.js
|
||||
## - name: ACTIONS_RUNNER_POD_NAME
|
||||
## valueFrom:
|
||||
## fieldRef:
|
||||
## fieldPath: metadata.name
|
||||
## - name: ACTIONS_RUNNER_IMAGE
|
||||
## value: ghcr.io/actions/actions-runner:latest # should match the runnerimage
|
||||
## - name: ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER
|
||||
## value: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: runner
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
// App is responsible for initializing required components and running the app.
|
||||
type App struct {
|
||||
// configured fields
|
||||
config *config.Config
|
||||
config config.Config
|
||||
logger logr.Logger
|
||||
|
||||
// initialized fields
|
||||
@@ -38,12 +38,8 @@ type Worker interface {
|
||||
}
|
||||
|
||||
func New(config config.Config) (*App, error) {
|
||||
if err := config.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("failed to validate config: %w", err)
|
||||
}
|
||||
|
||||
app := &App{
|
||||
config: &config,
|
||||
config: config,
|
||||
}
|
||||
|
||||
ghConfig, err := actions.ParseGitHubConfigFromURL(config.ConfigureUrl)
|
||||
@@ -73,8 +69,8 @@ func New(config config.Config) (*App, error) {
|
||||
Repository: ghConfig.Repository,
|
||||
ServerAddr: config.MetricsAddr,
|
||||
ServerEndpoint: config.MetricsEndpoint,
|
||||
Metrics: config.Metrics,
|
||||
Logger: app.logger.WithName("metrics exporter"),
|
||||
Metrics: *config.Metrics,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -10,26 +9,19 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
|
||||
"github.com/actions/actions-runner-controller/build"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/logging"
|
||||
"github.com/actions/actions-runner-controller/vault"
|
||||
"github.com/actions/actions-runner-controller/vault/azurekeyvault"
|
||||
"github.com/go-logr/logr"
|
||||
"golang.org/x/net/http/httpproxy"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
ConfigureUrl string `json:"configure_url"`
|
||||
VaultType vault.VaultType `json:"vault_type"`
|
||||
VaultLookupKey string `json:"vault_lookup_key"`
|
||||
// If the VaultType is set to "azure_key_vault", this field must be populated.
|
||||
AzureKeyVaultConfig *azurekeyvault.Config `json:"azure_key_vault,omitempty"`
|
||||
// AppConfig contains the GitHub App configuration.
|
||||
// It is initially set to nil if VaultType is set.
|
||||
// Otherwise, it is populated with the GitHub App credentials from the GitHub secret.
|
||||
*appconfig.AppConfig
|
||||
AppID int64 `json:"app_id"`
|
||||
AppInstallationID int64 `json:"app_installation_id"`
|
||||
AppPrivateKey string `json:"app_private_key"`
|
||||
Token string `json:"token"`
|
||||
EphemeralRunnerSetNamespace string `json:"ephemeral_runner_set_namespace"`
|
||||
EphemeralRunnerSetName string `json:"ephemeral_runner_set_name"`
|
||||
MaxRunners int `json:"max_runners"`
|
||||
@@ -44,58 +36,23 @@ type Config struct {
|
||||
Metrics *v1alpha1.MetricsConfig `json:"metrics"`
|
||||
}
|
||||
|
||||
func Read(ctx context.Context, configPath string) (*Config, error) {
|
||||
f, err := os.Open(configPath)
|
||||
func Read(path string) (Config, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return Config{}, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var config Config
|
||||
if err := json.NewDecoder(f).Decode(&config); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode config: %w", err)
|
||||
return Config{}, fmt.Errorf("failed to decode config: %w", err)
|
||||
}
|
||||
|
||||
var vault vault.Vault
|
||||
switch config.VaultType {
|
||||
case "":
|
||||
if err := config.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("failed to validate configuration: %v", err)
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
case "azure_key_vault":
|
||||
akv, err := azurekeyvault.New(*config.AzureKeyVaultConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Azure Key Vault client: %w", err)
|
||||
}
|
||||
|
||||
vault = akv
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported vault type: %s", config.VaultType)
|
||||
}
|
||||
|
||||
appConfigRaw, err := vault.GetSecret(ctx, config.VaultLookupKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get app config from vault: %w", err)
|
||||
}
|
||||
|
||||
appConfig, err := appconfig.FromJSONString(appConfigRaw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read app config from string: %v", err)
|
||||
}
|
||||
|
||||
config.AppConfig = appConfig
|
||||
|
||||
if err := config.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("config validation failed: %w", err)
|
||||
return Config{}, fmt.Errorf("failed to validate config: %w", err)
|
||||
}
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// Validate checks the configuration for errors.
|
||||
@@ -105,30 +62,26 @@ func (c *Config) Validate() error {
|
||||
}
|
||||
|
||||
if len(c.EphemeralRunnerSetNamespace) == 0 || len(c.EphemeralRunnerSetName) == 0 {
|
||||
return fmt.Errorf("EphemeralRunnerSetNamespace %q or EphemeralRunnerSetName %q is missing", c.EphemeralRunnerSetNamespace, c.EphemeralRunnerSetName)
|
||||
return fmt.Errorf("EphemeralRunnerSetNamespace '%s' or EphemeralRunnerSetName '%s' is missing", c.EphemeralRunnerSetNamespace, c.EphemeralRunnerSetName)
|
||||
}
|
||||
|
||||
if c.RunnerScaleSetId == 0 {
|
||||
return fmt.Errorf(`RunnerScaleSetId "%d" is missing`, c.RunnerScaleSetId)
|
||||
return fmt.Errorf("RunnerScaleSetId '%d' is missing", c.RunnerScaleSetId)
|
||||
}
|
||||
|
||||
if c.MaxRunners < c.MinRunners {
|
||||
return fmt.Errorf(`MinRunners "%d" cannot be greater than MaxRunners "%d"`, c.MinRunners, c.MaxRunners)
|
||||
return fmt.Errorf("MinRunners '%d' cannot be greater than MaxRunners '%d'", c.MinRunners, c.MaxRunners)
|
||||
}
|
||||
|
||||
if c.VaultType != "" {
|
||||
if err := c.VaultType.Validate(); err != nil {
|
||||
return fmt.Errorf("VaultType validation failed: %w", err)
|
||||
}
|
||||
if c.VaultLookupKey == "" {
|
||||
return fmt.Errorf("VaultLookupKey is required when VaultType is set to %q", c.VaultType)
|
||||
}
|
||||
hasToken := len(c.Token) > 0
|
||||
hasPrivateKeyConfig := c.AppID > 0 && c.AppPrivateKey != ""
|
||||
|
||||
if !hasToken && !hasPrivateKeyConfig {
|
||||
return fmt.Errorf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
|
||||
}
|
||||
|
||||
if c.VaultType == "" && c.VaultLookupKey == "" {
|
||||
if err := c.AppConfig.Validate(); err != nil {
|
||||
return fmt.Errorf("AppConfig validation failed: %w", err)
|
||||
}
|
||||
if hasToken && hasPrivateKeyConfig {
|
||||
return fmt.Errorf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
|
||||
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/github/actions/testserver"
|
||||
@@ -54,9 +53,7 @@ func TestCustomerServerRootCA(t *testing.T) {
|
||||
config := config.Config{
|
||||
ConfigureUrl: server.ConfigURLForOrg("myorg"),
|
||||
ServerRootCA: certsString,
|
||||
AppConfig: &appconfig.AppConfig{
|
||||
Token: "token",
|
||||
},
|
||||
}
|
||||
|
||||
client, err := config.ActionsClient(logr.Discard())
|
||||
@@ -83,9 +80,7 @@ func TestProxySettings(t *testing.T) {
|
||||
|
||||
config := config.Config{
|
||||
ConfigureUrl: "https://github.com/org/repo",
|
||||
AppConfig: &appconfig.AppConfig{
|
||||
Token: "token",
|
||||
},
|
||||
}
|
||||
|
||||
client, err := config.ActionsClient(logr.Discard())
|
||||
@@ -115,9 +110,7 @@ func TestProxySettings(t *testing.T) {
|
||||
|
||||
config := config.Config{
|
||||
ConfigureUrl: "https://github.com/org/repo",
|
||||
AppConfig: &appconfig.AppConfig{
|
||||
Token: "token",
|
||||
},
|
||||
}
|
||||
|
||||
client, err := config.ActionsClient(logr.Discard(), actions.WithRetryMax(0))
|
||||
@@ -152,9 +145,7 @@ func TestProxySettings(t *testing.T) {
|
||||
|
||||
config := config.Config{
|
||||
ConfigureUrl: "https://github.com/org/repo",
|
||||
AppConfig: &appconfig.AppConfig{
|
||||
Token: "token",
|
||||
},
|
||||
}
|
||||
|
||||
client, err := config.ActionsClient(logr.Discard())
|
||||
|
||||
92
cmd/ghalistener/config/config_test.go
Normal file
92
cmd/ghalistener/config/config_test.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConfigValidationMinMax(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
MinRunners: 5,
|
||||
MaxRunners: 2,
|
||||
Token: "token",
|
||||
}
|
||||
err := config.Validate()
|
||||
assert.ErrorContains(t, err, "MinRunners '5' cannot be greater than MaxRunners '2", "Expected error about MinRunners > MaxRunners")
|
||||
}
|
||||
|
||||
func TestConfigValidationMissingToken(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
err := config.Validate()
|
||||
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||
}
|
||||
|
||||
func TestConfigValidationAppKey(t *testing.T) {
|
||||
config := &Config{
|
||||
AppID: 1,
|
||||
AppInstallationID: 10,
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
err := config.Validate()
|
||||
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||
}
|
||||
|
||||
func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
|
||||
config := &Config{
|
||||
AppID: 1,
|
||||
AppInstallationID: 10,
|
||||
AppPrivateKey: "asdf",
|
||||
Token: "asdf",
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
err := config.Validate()
|
||||
expectedError := fmt.Sprintf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||
}
|
||||
|
||||
func TestConfigValidation(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "https://github.com/actions",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
MinRunners: 1,
|
||||
MaxRunners: 5,
|
||||
Token: "asdf",
|
||||
}
|
||||
|
||||
err := config.Validate()
|
||||
|
||||
assert.NoError(t, err, "Expected no error")
|
||||
}
|
||||
|
||||
func TestConfigValidationConfigUrl(t *testing.T) {
|
||||
config := &Config{
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
|
||||
err := config.Validate()
|
||||
|
||||
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
|
||||
}
|
||||
@@ -1,170 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
|
||||
"github.com/actions/actions-runner-controller/vault"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConfigValidationMinMax(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
MinRunners: 5,
|
||||
MaxRunners: 2,
|
||||
AppConfig: &appconfig.AppConfig{
|
||||
Token: "token",
|
||||
},
|
||||
}
|
||||
err := config.Validate()
|
||||
assert.ErrorContains(t, err, `MinRunners "5" cannot be greater than MaxRunners "2"`, "Expected error about MinRunners > MaxRunners")
|
||||
}
|
||||
|
||||
func TestConfigValidationMissingToken(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
err := config.Validate()
|
||||
expectedError := "AppConfig validation failed: missing app config"
|
||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||
}
|
||||
|
||||
func TestConfigValidationAppKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("app id integer", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
config := &Config{
|
||||
AppConfig: &appconfig.AppConfig{
|
||||
AppID: "1",
|
||||
AppInstallationID: 10,
|
||||
},
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
err := config.Validate()
|
||||
expectedError := "AppConfig validation failed: no credentials provided: either a PAT or GitHub App credentials should be provided"
|
||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||
})
|
||||
|
||||
t.Run("app id as client id", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
config := &Config{
|
||||
AppConfig: &appconfig.AppConfig{
|
||||
AppID: "Iv23f8doAlphaNumer1c",
|
||||
AppInstallationID: 10,
|
||||
},
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
err := config.Validate()
|
||||
expectedError := "AppConfig validation failed: no credentials provided: either a PAT or GitHub App credentials should be provided"
|
||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
|
||||
config := &Config{
|
||||
AppConfig: &appconfig.AppConfig{
|
||||
AppID: "1",
|
||||
AppInstallationID: 10,
|
||||
AppPrivateKey: "asdf",
|
||||
Token: "asdf",
|
||||
},
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
err := config.Validate()
|
||||
expectedError := "AppConfig validation failed: both PAT and GitHub App credentials provided. should only provide one"
|
||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||
}
|
||||
|
||||
func TestConfigValidation(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "https://github.com/actions",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
MinRunners: 1,
|
||||
MaxRunners: 5,
|
||||
AppConfig: &appconfig.AppConfig{
|
||||
Token: "asdf",
|
||||
},
|
||||
}
|
||||
|
||||
err := config.Validate()
|
||||
|
||||
assert.NoError(t, err, "Expected no error")
|
||||
}
|
||||
|
||||
func TestConfigValidationConfigUrl(t *testing.T) {
|
||||
config := &Config{
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
|
||||
err := config.Validate()
|
||||
|
||||
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
|
||||
}
|
||||
|
||||
func TestConfigValidationWithVaultConfig(t *testing.T) {
|
||||
t.Run("valid", func(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "https://github.com/actions",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
MinRunners: 1,
|
||||
MaxRunners: 5,
|
||||
VaultType: vault.VaultTypeAzureKeyVault,
|
||||
VaultLookupKey: "testkey",
|
||||
}
|
||||
err := config.Validate()
|
||||
assert.NoError(t, err, "Expected no error for valid vault type")
|
||||
})
|
||||
|
||||
t.Run("invalid vault type", func(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "https://github.com/actions",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
MinRunners: 1,
|
||||
MaxRunners: 5,
|
||||
VaultType: vault.VaultType("invalid_vault_type"),
|
||||
VaultLookupKey: "testkey",
|
||||
}
|
||||
err := config.Validate()
|
||||
assert.ErrorContains(t, err, `unknown vault type: "invalid_vault_type"`, "Expected error for invalid vault type")
|
||||
})
|
||||
|
||||
t.Run("vault type set without lookup key", func(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "https://github.com/actions",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
MinRunners: 1,
|
||||
MaxRunners: 5,
|
||||
VaultType: vault.VaultTypeAzureKeyVault,
|
||||
VaultLookupKey: "",
|
||||
}
|
||||
err := config.Validate()
|
||||
assert.ErrorContains(t, err, `VaultLookupKey is required when VaultType is set to "azure_key_vault"`, "Expected error for vault type without lookup key")
|
||||
})
|
||||
}
|
||||
@@ -361,7 +361,7 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
|
||||
return nil, fmt.Errorf("failed to decode job available: %w", err)
|
||||
}
|
||||
|
||||
l.logger.Info("Job available message received", "jobId", jobAvailable.JobID)
|
||||
l.logger.Info("Job available message received", "jobId", jobAvailable.RunnerRequestId)
|
||||
parsedMsg.jobsAvailable = append(parsedMsg.jobsAvailable, &jobAvailable)
|
||||
|
||||
case messageTypeJobAssigned:
|
||||
@@ -370,14 +370,14 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
|
||||
return nil, fmt.Errorf("failed to decode job assigned: %w", err)
|
||||
}
|
||||
|
||||
l.logger.Info("Job assigned message received", "jobId", jobAssigned.JobID)
|
||||
l.logger.Info("Job assigned message received", "jobId", jobAssigned.RunnerRequestId)
|
||||
|
||||
case messageTypeJobStarted:
|
||||
var jobStarted actions.JobStarted
|
||||
if err := json.Unmarshal(msg, &jobStarted); err != nil {
|
||||
return nil, fmt.Errorf("could not decode job started message. %w", err)
|
||||
}
|
||||
l.logger.Info("Job started message received.", "JobID", jobStarted.JobID, "RunnerId", jobStarted.RunnerID)
|
||||
l.logger.Info("Job started message received.", "RequestId", jobStarted.RunnerRequestId, "RunnerId", jobStarted.RunnerId)
|
||||
parsedMsg.jobsStarted = append(parsedMsg.jobsStarted, &jobStarted)
|
||||
|
||||
case messageTypeJobCompleted:
|
||||
@@ -386,13 +386,7 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
|
||||
return nil, fmt.Errorf("failed to decode job completed: %w", err)
|
||||
}
|
||||
|
||||
l.logger.Info(
|
||||
"Job completed message received.",
|
||||
"JobID", jobCompleted.JobID,
|
||||
"Result", jobCompleted.Result,
|
||||
"RunnerId", jobCompleted.RunnerId,
|
||||
"RunnerName", jobCompleted.RunnerName,
|
||||
)
|
||||
l.logger.Info("Job completed message received.", "RequestId", jobCompleted.RunnerRequestId, "Result", jobCompleted.Result, "RunnerId", jobCompleted.RunnerId, "RunnerName", jobCompleted.RunnerName)
|
||||
parsedMsg.jobsCompleted = append(parsedMsg.jobsCompleted, &jobCompleted)
|
||||
|
||||
default:
|
||||
@@ -406,7 +400,7 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
|
||||
func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*actions.JobAvailable) ([]int64, error) {
|
||||
ids := make([]int64, 0, len(jobsAvailable))
|
||||
for _, job := range jobsAvailable {
|
||||
ids = append(ids, job.RunnerRequestID)
|
||||
ids = append(ids, job.RunnerRequestId)
|
||||
}
|
||||
|
||||
l.logger.Info("Acquiring jobs", "count", len(ids), "requestIds", fmt.Sprint(ids))
|
||||
|
||||
@@ -627,17 +627,17 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
|
||||
availableJobs := []*actions.JobAvailable{
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestID: 1,
|
||||
RunnerRequestId: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestID: 2,
|
||||
RunnerRequestId: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestID: 3,
|
||||
RunnerRequestId: 3,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -678,17 +678,17 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
|
||||
availableJobs := []*actions.JobAvailable{
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestID: 1,
|
||||
RunnerRequestId: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestID: 2,
|
||||
RunnerRequestId: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestID: 3,
|
||||
RunnerRequestId: 3,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -724,17 +724,17 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
|
||||
availableJobs := []*actions.JobAvailable{
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestID: 1,
|
||||
RunnerRequestId: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestID: 2,
|
||||
RunnerRequestId: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestID: 3,
|
||||
RunnerRequestId: 3,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -809,17 +809,17 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
|
||||
availableJobs := []*actions.JobAvailable{
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestID: 1,
|
||||
RunnerRequestId: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestID: 2,
|
||||
RunnerRequestId: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestID: 3,
|
||||
RunnerRequestId: 3,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -881,7 +881,7 @@ func TestListener_parseMessage(t *testing.T) {
|
||||
JobMessageType: actions.JobMessageType{
|
||||
MessageType: messageTypeJobAvailable,
|
||||
},
|
||||
RunnerRequestID: 1,
|
||||
RunnerRequestId: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -890,7 +890,7 @@ func TestListener_parseMessage(t *testing.T) {
|
||||
JobMessageType: actions.JobMessageType{
|
||||
MessageType: messageTypeJobAvailable,
|
||||
},
|
||||
RunnerRequestID: 2,
|
||||
RunnerRequestId: 2,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -904,7 +904,7 @@ func TestListener_parseMessage(t *testing.T) {
|
||||
JobMessageType: actions.JobMessageType{
|
||||
MessageType: messageTypeJobAssigned,
|
||||
},
|
||||
RunnerRequestID: 3,
|
||||
RunnerRequestId: 3,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -912,7 +912,7 @@ func TestListener_parseMessage(t *testing.T) {
|
||||
JobMessageType: actions.JobMessageType{
|
||||
MessageType: messageTypeJobAssigned,
|
||||
},
|
||||
RunnerRequestID: 4,
|
||||
RunnerRequestId: 4,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -926,9 +926,9 @@ func TestListener_parseMessage(t *testing.T) {
|
||||
JobMessageType: actions.JobMessageType{
|
||||
MessageType: messageTypeJobStarted,
|
||||
},
|
||||
RunnerRequestID: 5,
|
||||
RunnerRequestId: 5,
|
||||
},
|
||||
RunnerID: 2,
|
||||
RunnerId: 2,
|
||||
RunnerName: "runner2",
|
||||
},
|
||||
}
|
||||
@@ -942,7 +942,7 @@ func TestListener_parseMessage(t *testing.T) {
|
||||
JobMessageType: actions.JobMessageType{
|
||||
MessageType: messageTypeJobCompleted,
|
||||
},
|
||||
RunnerRequestID: 6,
|
||||
RunnerRequestId: 6,
|
||||
},
|
||||
Result: "success",
|
||||
RunnerId: 1,
|
||||
|
||||
@@ -123,9 +123,9 @@ func TestHandleMessageMetrics(t *testing.T) {
|
||||
JobMessageType: actions.JobMessageType{
|
||||
MessageType: messageTypeJobStarted,
|
||||
},
|
||||
RunnerRequestID: 8,
|
||||
RunnerRequestId: 8,
|
||||
},
|
||||
RunnerID: 3,
|
||||
RunnerId: 3,
|
||||
RunnerName: "runner3",
|
||||
},
|
||||
}
|
||||
@@ -139,7 +139,7 @@ func TestHandleMessageMetrics(t *testing.T) {
|
||||
JobMessageType: actions.JobMessageType{
|
||||
MessageType: messageTypeJobCompleted,
|
||||
},
|
||||
RunnerRequestID: 6,
|
||||
RunnerRequestId: 6,
|
||||
},
|
||||
Result: "success",
|
||||
RunnerId: 1,
|
||||
@@ -150,7 +150,7 @@ func TestHandleMessageMetrics(t *testing.T) {
|
||||
JobMessageType: actions.JobMessageType{
|
||||
MessageType: messageTypeJobCompleted,
|
||||
},
|
||||
RunnerRequestID: 7,
|
||||
RunnerRequestId: 7,
|
||||
},
|
||||
Result: "success",
|
||||
RunnerId: 2,
|
||||
|
||||
@@ -13,27 +13,26 @@ import (
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
configPath, ok := os.LookupEnv("LISTENER_CONFIG_PATH")
|
||||
if !ok {
|
||||
fmt.Fprintf(os.Stderr, "Error: LISTENER_CONFIG_PATH environment variable is not set\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
config, err := config.Read(ctx, configPath)
|
||||
config, err := config.Read(configPath)
|
||||
if err != nil {
|
||||
log.Printf("Failed to read config: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
app, err := app.New(*config)
|
||||
app, err := app.New(config)
|
||||
if err != nil {
|
||||
log.Printf("Failed to initialize app: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
if err := app.Run(ctx); err != nil {
|
||||
log.Printf("Application returned an error: %v", err)
|
||||
os.Exit(1)
|
||||
|
||||
@@ -21,9 +21,6 @@ const (
|
||||
labelKeyOrganization = "organization"
|
||||
labelKeyRepository = "repository"
|
||||
labelKeyJobName = "job_name"
|
||||
labelKeyJobWorkflowRef = "job_workflow_ref"
|
||||
labelKeyJobWorkflowName = "job_workflow_name"
|
||||
labelKeyJobWorkflowTarget = "job_workflow_target"
|
||||
labelKeyEventName = "event_name"
|
||||
labelKeyJobResult = "job_result"
|
||||
)
|
||||
@@ -77,15 +74,11 @@ var metricsHelp = metricsHelpRegistry{
|
||||
}
|
||||
|
||||
func (e *exporter) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
|
||||
workflowRefInfo := ParseWorkflowRef(jobBase.JobWorkflowRef)
|
||||
return prometheus.Labels{
|
||||
labelKeyEnterprise: e.scaleSetLabels[labelKeyEnterprise],
|
||||
labelKeyOrganization: jobBase.OwnerName,
|
||||
labelKeyRepository: jobBase.RepositoryName,
|
||||
labelKeyJobName: jobBase.JobDisplayName,
|
||||
labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
|
||||
labelKeyJobWorkflowName: workflowRefInfo.Name,
|
||||
labelKeyJobWorkflowTarget: workflowRefInfo.Target,
|
||||
labelKeyEventName: jobBase.EventName,
|
||||
}
|
||||
}
|
||||
@@ -159,148 +152,13 @@ type ExporterConfig struct {
|
||||
ServerAddr string
|
||||
ServerEndpoint string
|
||||
Logger logr.Logger
|
||||
Metrics *v1alpha1.MetricsConfig
|
||||
}
|
||||
|
||||
var defaultMetrics = v1alpha1.MetricsConfig{
|
||||
Counters: map[string]*v1alpha1.CounterMetric{
|
||||
MetricStartedJobsTotal: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyJobName,
|
||||
labelKeyEventName,
|
||||
},
|
||||
},
|
||||
MetricCompletedJobsTotal: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyJobName,
|
||||
labelKeyEventName,
|
||||
labelKeyJobResult,
|
||||
},
|
||||
},
|
||||
},
|
||||
Gauges: map[string]*v1alpha1.GaugeMetric{
|
||||
MetricAssignedJobs: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyRunnerScaleSetName,
|
||||
labelKeyRunnerScaleSetNamespace,
|
||||
},
|
||||
},
|
||||
MetricRunningJobs: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyRunnerScaleSetName,
|
||||
labelKeyRunnerScaleSetNamespace,
|
||||
},
|
||||
},
|
||||
MetricRegisteredRunners: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyRunnerScaleSetName,
|
||||
labelKeyRunnerScaleSetNamespace,
|
||||
},
|
||||
},
|
||||
MetricBusyRunners: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyRunnerScaleSetName,
|
||||
labelKeyRunnerScaleSetNamespace,
|
||||
},
|
||||
},
|
||||
MetricMinRunners: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyRunnerScaleSetName,
|
||||
labelKeyRunnerScaleSetNamespace,
|
||||
},
|
||||
},
|
||||
MetricMaxRunners: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyRunnerScaleSetName,
|
||||
labelKeyRunnerScaleSetNamespace,
|
||||
},
|
||||
},
|
||||
MetricDesiredRunners: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyRunnerScaleSetName,
|
||||
labelKeyRunnerScaleSetNamespace,
|
||||
},
|
||||
},
|
||||
MetricIdleRunners: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyRunnerScaleSetName,
|
||||
labelKeyRunnerScaleSetNamespace,
|
||||
},
|
||||
},
|
||||
},
|
||||
Histograms: map[string]*v1alpha1.HistogramMetric{
|
||||
MetricJobStartupDurationSeconds: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyJobName,
|
||||
labelKeyEventName,
|
||||
},
|
||||
Buckets: defaultRuntimeBuckets,
|
||||
},
|
||||
MetricJobExecutionDurationSeconds: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyJobName,
|
||||
labelKeyEventName,
|
||||
labelKeyJobResult,
|
||||
},
|
||||
Buckets: defaultRuntimeBuckets,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func (e *ExporterConfig) defaults() {
|
||||
if e.ServerAddr == "" {
|
||||
e.ServerAddr = ":8080"
|
||||
}
|
||||
if e.ServerEndpoint == "" {
|
||||
e.ServerEndpoint = "/metrics"
|
||||
}
|
||||
if e.Metrics == nil {
|
||||
defaultMetrics := defaultMetrics
|
||||
e.Metrics = &defaultMetrics
|
||||
}
|
||||
Metrics v1alpha1.MetricsConfig
|
||||
}
|
||||
|
||||
func NewExporter(config ExporterConfig) ServerExporter {
|
||||
config.defaults()
|
||||
reg := prometheus.NewRegistry()
|
||||
|
||||
metrics := installMetrics(*config.Metrics, reg, config.Logger)
|
||||
metrics := installMetrics(config.Metrics, reg, config.Logger)
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle(
|
||||
@@ -473,7 +331,7 @@ func (e *exporter) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
|
||||
e.setGauge(MetricAssignedJobs, e.scaleSetLabels, float64(stats.TotalAssignedJobs))
|
||||
e.setGauge(MetricRunningJobs, e.scaleSetLabels, float64(stats.TotalRunningJobs))
|
||||
e.setGauge(MetricRegisteredRunners, e.scaleSetLabels, float64(stats.TotalRegisteredRunners))
|
||||
e.setGauge(MetricBusyRunners, e.scaleSetLabels, float64(stats.TotalBusyRunners))
|
||||
e.setGauge(MetricBusyRunners, e.scaleSetLabels, float64(float64(stats.TotalBusyRunners)))
|
||||
e.setGauge(MetricIdleRunners, e.scaleSetLabels, float64(stats.TotalIdleRunners))
|
||||
}
|
||||
|
||||
|
||||
@@ -1,99 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMetricsWithWorkflowRefParsing(t *testing.T) {
|
||||
// Create a test exporter
|
||||
exporter := &exporter{
|
||||
scaleSetLabels: prometheus.Labels{
|
||||
labelKeyEnterprise: "test-enterprise",
|
||||
labelKeyOrganization: "test-org",
|
||||
labelKeyRepository: "test-repo",
|
||||
labelKeyRunnerScaleSetName: "test-scale-set",
|
||||
labelKeyRunnerScaleSetNamespace: "test-namespace",
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
jobBase actions.JobMessageBase
|
||||
wantName string
|
||||
wantTarget string
|
||||
}{
|
||||
{
|
||||
name: "main branch workflow",
|
||||
jobBase: actions.JobMessageBase{
|
||||
OwnerName: "actions",
|
||||
RepositoryName: "runner",
|
||||
JobDisplayName: "Build and Test",
|
||||
JobWorkflowRef: "actions/runner/.github/workflows/build.yml@refs/heads/main",
|
||||
EventName: "push",
|
||||
},
|
||||
wantName: "build",
|
||||
wantTarget: "heads/main",
|
||||
},
|
||||
{
|
||||
name: "feature branch workflow",
|
||||
jobBase: actions.JobMessageBase{
|
||||
OwnerName: "myorg",
|
||||
RepositoryName: "myrepo",
|
||||
JobDisplayName: "CI/CD Pipeline",
|
||||
JobWorkflowRef: "myorg/myrepo/.github/workflows/ci-cd-pipeline.yml@refs/heads/feature/new-metrics",
|
||||
EventName: "push",
|
||||
},
|
||||
wantName: "ci-cd-pipeline",
|
||||
wantTarget: "heads/feature/new-metrics",
|
||||
},
|
||||
{
|
||||
name: "pull request workflow",
|
||||
jobBase: actions.JobMessageBase{
|
||||
OwnerName: "actions",
|
||||
RepositoryName: "runner",
|
||||
JobDisplayName: "PR Checks",
|
||||
JobWorkflowRef: "actions/runner/.github/workflows/pr-checks.yml@refs/pull/123/merge",
|
||||
EventName: "pull_request",
|
||||
},
|
||||
wantName: "pr-checks",
|
||||
wantTarget: "pull/123",
|
||||
},
|
||||
{
|
||||
name: "tag workflow",
|
||||
jobBase: actions.JobMessageBase{
|
||||
OwnerName: "actions",
|
||||
RepositoryName: "runner",
|
||||
JobDisplayName: "Release",
|
||||
JobWorkflowRef: "actions/runner/.github/workflows/release.yml@refs/tags/v1.2.3",
|
||||
EventName: "release",
|
||||
},
|
||||
wantName: "release",
|
||||
wantTarget: "tags/v1.2.3",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
labels := exporter.jobLabels(&tt.jobBase)
|
||||
|
||||
// Build expected labels
|
||||
expectedLabels := prometheus.Labels{
|
||||
labelKeyEnterprise: "test-enterprise",
|
||||
labelKeyOrganization: tt.jobBase.OwnerName,
|
||||
labelKeyRepository: tt.jobBase.RepositoryName,
|
||||
labelKeyJobName: tt.jobBase.JobDisplayName,
|
||||
labelKeyJobWorkflowRef: tt.jobBase.JobWorkflowRef,
|
||||
labelKeyJobWorkflowName: tt.wantName,
|
||||
labelKeyJobWorkflowTarget: tt.wantTarget,
|
||||
labelKeyEventName: tt.jobBase.EventName,
|
||||
}
|
||||
|
||||
// Assert all expected labels match
|
||||
assert.Equal(t, expectedLabels, labels, "jobLabels() returned unexpected labels for %s", tt.name)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInstallMetrics(t *testing.T) {
|
||||
@@ -87,179 +86,3 @@ func TestInstallMetrics(t *testing.T) {
|
||||
assert.Equal(t, duration.config.Labels, metricsConfig.Histograms[MetricJobStartupDurationSeconds].Labels)
|
||||
assert.Equal(t, duration.config.Buckets, defaultRuntimeBuckets)
|
||||
}
|
||||
|
||||
func TestNewExporter(t *testing.T) {
|
||||
t.Run("with defaults metrics applied", func(t *testing.T) {
|
||||
config := ExporterConfig{
|
||||
ScaleSetName: "test-scale-set",
|
||||
ScaleSetNamespace: "test-namespace",
|
||||
Enterprise: "",
|
||||
Organization: "org",
|
||||
Repository: "repo",
|
||||
ServerAddr: ":6060",
|
||||
ServerEndpoint: "/metrics",
|
||||
Logger: logr.Discard(),
|
||||
Metrics: nil, // when metrics is nil, all default metrics should be registered
|
||||
}
|
||||
|
||||
exporter, ok := NewExporter(config).(*exporter)
|
||||
require.True(t, ok, "expected exporter to be of type *exporter")
|
||||
require.NotNil(t, exporter)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
wantMetrics := installMetrics(defaultMetrics, reg, config.Logger)
|
||||
|
||||
assert.Equal(t, len(wantMetrics.counters), len(exporter.counters))
|
||||
for k, v := range wantMetrics.counters {
|
||||
assert.Contains(t, exporter.counters, k)
|
||||
assert.Equal(t, v.config, exporter.counters[k].config)
|
||||
}
|
||||
|
||||
assert.Equal(t, len(wantMetrics.gauges), len(exporter.gauges))
|
||||
for k, v := range wantMetrics.gauges {
|
||||
assert.Contains(t, exporter.gauges, k)
|
||||
assert.Equal(t, v.config, exporter.gauges[k].config)
|
||||
}
|
||||
|
||||
assert.Equal(t, len(wantMetrics.histograms), len(exporter.histograms))
|
||||
for k, v := range wantMetrics.histograms {
|
||||
assert.Contains(t, exporter.histograms, k)
|
||||
assert.Equal(t, v.config, exporter.histograms[k].config)
|
||||
}
|
||||
|
||||
require.NotNil(t, exporter.srv)
|
||||
assert.Equal(t, config.ServerAddr, exporter.srv.Addr)
|
||||
})
|
||||
|
||||
t.Run("with default server URL", func(t *testing.T) {
|
||||
config := ExporterConfig{
|
||||
ScaleSetName: "test-scale-set",
|
||||
ScaleSetNamespace: "test-namespace",
|
||||
Enterprise: "",
|
||||
Organization: "org",
|
||||
Repository: "repo",
|
||||
ServerAddr: "", // empty ServerAddr should default to ":8080"
|
||||
ServerEndpoint: "",
|
||||
Logger: logr.Discard(),
|
||||
Metrics: nil, // when metrics is nil, all default metrics should be registered
|
||||
}
|
||||
|
||||
exporter, ok := NewExporter(config).(*exporter)
|
||||
require.True(t, ok, "expected exporter to be of type *exporter")
|
||||
require.NotNil(t, exporter)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
wantMetrics := installMetrics(defaultMetrics, reg, config.Logger)
|
||||
|
||||
assert.Equal(t, len(wantMetrics.counters), len(exporter.counters))
|
||||
for k, v := range wantMetrics.counters {
|
||||
assert.Contains(t, exporter.counters, k)
|
||||
assert.Equal(t, v.config, exporter.counters[k].config)
|
||||
}
|
||||
|
||||
assert.Equal(t, len(wantMetrics.gauges), len(exporter.gauges))
|
||||
for k, v := range wantMetrics.gauges {
|
||||
assert.Contains(t, exporter.gauges, k)
|
||||
assert.Equal(t, v.config, exporter.gauges[k].config)
|
||||
}
|
||||
|
||||
assert.Equal(t, len(wantMetrics.histograms), len(exporter.histograms))
|
||||
for k, v := range wantMetrics.histograms {
|
||||
assert.Contains(t, exporter.histograms, k)
|
||||
assert.Equal(t, v.config, exporter.histograms[k].config)
|
||||
}
|
||||
|
||||
require.NotNil(t, exporter.srv)
|
||||
assert.Equal(t, exporter.srv.Addr, ":8080")
|
||||
})
|
||||
|
||||
t.Run("with metrics configured", func(t *testing.T) {
|
||||
metricsConfig := v1alpha1.MetricsConfig{
|
||||
Counters: map[string]*v1alpha1.CounterMetric{
|
||||
MetricStartedJobsTotal: {
|
||||
Labels: []string{labelKeyRepository},
|
||||
},
|
||||
},
|
||||
Gauges: map[string]*v1alpha1.GaugeMetric{
|
||||
MetricAssignedJobs: {
|
||||
Labels: []string{labelKeyRepository},
|
||||
},
|
||||
},
|
||||
Histograms: map[string]*v1alpha1.HistogramMetric{
|
||||
MetricJobExecutionDurationSeconds: {
|
||||
Labels: []string{labelKeyRepository},
|
||||
Buckets: []float64{0.1, 1},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
config := ExporterConfig{
|
||||
ScaleSetName: "test-scale-set",
|
||||
ScaleSetNamespace: "test-namespace",
|
||||
Enterprise: "",
|
||||
Organization: "org",
|
||||
Repository: "repo",
|
||||
ServerAddr: ":6060",
|
||||
ServerEndpoint: "/metrics",
|
||||
Logger: logr.Discard(),
|
||||
Metrics: &metricsConfig,
|
||||
}
|
||||
|
||||
exporter, ok := NewExporter(config).(*exporter)
|
||||
require.True(t, ok, "expected exporter to be of type *exporter")
|
||||
require.NotNil(t, exporter)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
wantMetrics := installMetrics(metricsConfig, reg, config.Logger)
|
||||
|
||||
assert.Equal(t, len(wantMetrics.counters), len(exporter.counters))
|
||||
for k, v := range wantMetrics.counters {
|
||||
assert.Contains(t, exporter.counters, k)
|
||||
assert.Equal(t, v.config, exporter.counters[k].config)
|
||||
}
|
||||
|
||||
assert.Equal(t, len(wantMetrics.gauges), len(exporter.gauges))
|
||||
for k, v := range wantMetrics.gauges {
|
||||
assert.Contains(t, exporter.gauges, k)
|
||||
assert.Equal(t, v.config, exporter.gauges[k].config)
|
||||
}
|
||||
|
||||
assert.Equal(t, len(wantMetrics.histograms), len(exporter.histograms))
|
||||
for k, v := range wantMetrics.histograms {
|
||||
assert.Contains(t, exporter.histograms, k)
|
||||
assert.Equal(t, v.config, exporter.histograms[k].config)
|
||||
}
|
||||
|
||||
require.NotNil(t, exporter.srv)
|
||||
assert.Equal(t, config.ServerAddr, exporter.srv.Addr)
|
||||
})
|
||||
}
|
||||
|
||||
func TestExporterConfigDefaults(t *testing.T) {
|
||||
config := ExporterConfig{
|
||||
ScaleSetName: "test-scale-set",
|
||||
ScaleSetNamespace: "test-namespace",
|
||||
Enterprise: "",
|
||||
Organization: "org",
|
||||
Repository: "repo",
|
||||
ServerAddr: "",
|
||||
ServerEndpoint: "",
|
||||
Logger: logr.Discard(),
|
||||
Metrics: nil, // when metrics is nil, all default metrics should be registered
|
||||
}
|
||||
|
||||
config.defaults()
|
||||
want := ExporterConfig{
|
||||
ScaleSetName: "test-scale-set",
|
||||
ScaleSetNamespace: "test-namespace",
|
||||
Enterprise: "",
|
||||
Organization: "org",
|
||||
Repository: "repo",
|
||||
ServerAddr: ":8080", // default server address
|
||||
ServerEndpoint: "/metrics", // default server endpoint
|
||||
Logger: logr.Discard(),
|
||||
Metrics: &defaultMetrics, // when metrics is nil, all default metrics should be registered
|
||||
}
|
||||
|
||||
assert.Equal(t, want, config)
|
||||
}
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// WorkflowRefInfo contains parsed information from a job_workflow_ref
|
||||
type WorkflowRefInfo struct {
|
||||
// Name is the workflow file name without extension
|
||||
Name string
|
||||
// Target is the target ref with type prefix retained for clarity
|
||||
// Examples:
|
||||
// - heads/main (branch)
|
||||
// - heads/feature/new-feature (branch)
|
||||
// - tags/v1.2.3 (tag)
|
||||
// - pull/123 (pull request)
|
||||
Target string
|
||||
}
|
||||
|
||||
// ParseWorkflowRef parses a job_workflow_ref string to extract workflow name and target
|
||||
// Format: {owner}/{repo}/.github/workflows/{workflow_file}@{ref}
|
||||
// Example: mygithuborg/myrepo/.github/workflows/blank.yml@refs/heads/main
|
||||
//
|
||||
// The target field preserves type prefixes to differentiate between:
|
||||
// - Branch references: "heads/{branch}" (from refs/heads/{branch})
|
||||
// - Tag references: "tags/{tag}" (from refs/tags/{tag})
|
||||
// - Pull requests: "pull/{number}" (from refs/pull/{number}/merge)
|
||||
func ParseWorkflowRef(workflowRef string) WorkflowRefInfo {
|
||||
info := WorkflowRefInfo{}
|
||||
|
||||
if workflowRef == "" {
|
||||
return info
|
||||
}
|
||||
|
||||
// Split by @ to separate path and ref
|
||||
parts := strings.Split(workflowRef, "@")
|
||||
if len(parts) != 2 {
|
||||
return info
|
||||
}
|
||||
|
||||
workflowPath := parts[0]
|
||||
ref := parts[1]
|
||||
|
||||
// Extract workflow name from path
|
||||
// The path format is: {owner}/{repo}/.github/workflows/{workflow_file}
|
||||
workflowFile := path.Base(workflowPath)
|
||||
// Remove .yml or .yaml extension
|
||||
info.Name = strings.TrimSuffix(strings.TrimSuffix(workflowFile, ".yml"), ".yaml")
|
||||
|
||||
// Extract target from ref based on type
|
||||
// Branch refs: refs/heads/{branch}
|
||||
// Tag refs: refs/tags/{tag}
|
||||
// PR refs: refs/pull/{number}/merge
|
||||
const (
|
||||
branchPrefix = "refs/heads/"
|
||||
tagPrefix = "refs/tags/"
|
||||
prPrefix = "refs/pull/"
|
||||
)
|
||||
|
||||
switch {
|
||||
case strings.HasPrefix(ref, branchPrefix):
|
||||
// Keep "heads/" prefix to indicate branch
|
||||
info.Target = "heads/" + strings.TrimPrefix(ref, branchPrefix)
|
||||
case strings.HasPrefix(ref, tagPrefix):
|
||||
// Keep "tags/" prefix to indicate tag
|
||||
info.Target = "tags/" + strings.TrimPrefix(ref, tagPrefix)
|
||||
case strings.HasPrefix(ref, prPrefix):
|
||||
// Extract PR number from refs/pull/{number}/merge
|
||||
// Keep "pull/" prefix to indicate pull request
|
||||
prPart := strings.TrimPrefix(ref, prPrefix)
|
||||
if idx := strings.Index(prPart, "/"); idx > 0 {
|
||||
info.Target = "pull/" + prPart[:idx]
|
||||
}
|
||||
}
|
||||
|
||||
return info
|
||||
}
|
||||
@@ -1,82 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseWorkflowRef(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
workflowRef string
|
||||
wantName string
|
||||
wantTarget string
|
||||
}{
|
||||
{
|
||||
name: "standard branch reference with yml",
|
||||
workflowRef: "actions-runner-controller-sandbox/mumoshu-orgrunner-test-01/.github/workflows/blank.yml@refs/heads/main",
|
||||
wantName: "blank",
|
||||
wantTarget: "heads/main",
|
||||
},
|
||||
{
|
||||
name: "branch with special characters",
|
||||
workflowRef: "owner/repo/.github/workflows/ci-cd.yml@refs/heads/feature/new-feature",
|
||||
wantName: "ci-cd",
|
||||
wantTarget: "heads/feature/new-feature",
|
||||
},
|
||||
{
|
||||
name: "yaml extension",
|
||||
workflowRef: "owner/repo/.github/workflows/deploy.yaml@refs/heads/develop",
|
||||
wantName: "deploy",
|
||||
wantTarget: "heads/develop",
|
||||
},
|
||||
{
|
||||
name: "tag reference",
|
||||
workflowRef: "owner/repo/.github/workflows/release.yml@refs/tags/v1.0.0",
|
||||
wantName: "release",
|
||||
wantTarget: "tags/v1.0.0",
|
||||
},
|
||||
{
|
||||
name: "pull request reference",
|
||||
workflowRef: "owner/repo/.github/workflows/test.yml@refs/pull/123/merge",
|
||||
wantName: "test",
|
||||
wantTarget: "pull/123",
|
||||
},
|
||||
{
|
||||
name: "empty workflow ref",
|
||||
workflowRef: "",
|
||||
wantName: "",
|
||||
wantTarget: "",
|
||||
},
|
||||
{
|
||||
name: "invalid format - no @ separator",
|
||||
workflowRef: "owner/repo/.github/workflows/test.yml",
|
||||
wantName: "",
|
||||
wantTarget: "",
|
||||
},
|
||||
{
|
||||
name: "workflow with dots in name",
|
||||
workflowRef: "owner/repo/.github/workflows/build.test.yml@refs/heads/main",
|
||||
wantName: "build.test",
|
||||
wantTarget: "heads/main",
|
||||
},
|
||||
{
|
||||
name: "workflow with hyphen and underscore",
|
||||
workflowRef: "owner/repo/.github/workflows/build-test_deploy.yml@refs/heads/main",
|
||||
wantName: "build-test_deploy",
|
||||
wantTarget: "heads/main",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := ParseWorkflowRef(tt.workflowRef)
|
||||
expected := WorkflowRefInfo{
|
||||
Name: tt.wantName,
|
||||
Target: tt.wantTarget,
|
||||
}
|
||||
assert.Equal(t, expected, got, "ParseWorkflowRef(%q) returned unexpected result", tt.workflowRef)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -100,11 +100,10 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
|
||||
"runnerName", jobInfo.RunnerName,
|
||||
"ownerName", jobInfo.OwnerName,
|
||||
"repoName", jobInfo.RepositoryName,
|
||||
"jobId", jobInfo.JobID,
|
||||
"workflowRef", jobInfo.JobWorkflowRef,
|
||||
"workflowRunId", jobInfo.WorkflowRunID,
|
||||
"workflowRunId", jobInfo.WorkflowRunId,
|
||||
"jobDisplayName", jobInfo.JobDisplayName,
|
||||
"requestId", jobInfo.RunnerRequestID)
|
||||
"requestId", jobInfo.RunnerRequestId)
|
||||
|
||||
original, err := json.Marshal(&v1alpha1.EphemeralRunner{})
|
||||
if err != nil {
|
||||
@@ -114,10 +113,9 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
|
||||
patch, err := json.Marshal(
|
||||
&v1alpha1.EphemeralRunner{
|
||||
Status: v1alpha1.EphemeralRunnerStatus{
|
||||
JobRequestId: jobInfo.RunnerRequestID,
|
||||
JobRequestId: jobInfo.RunnerRequestId,
|
||||
JobRepositoryName: fmt.Sprintf("%s/%s", jobInfo.OwnerName, jobInfo.RepositoryName),
|
||||
JobID: jobInfo.JobID,
|
||||
WorkflowRunId: jobInfo.WorkflowRunID,
|
||||
WorkflowRunId: jobInfo.WorkflowRunId,
|
||||
JobWorkflowRef: jobInfo.JobWorkflowRef,
|
||||
JobDisplayName: jobInfo.JobDisplayName,
|
||||
},
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.19.0
|
||||
controller-gen.kubebuilder.io/version: v0.17.2
|
||||
name: ephemeralrunners.actions.github.com
|
||||
spec:
|
||||
group: actions.github.com
|
||||
@@ -36,9 +36,6 @@ spec:
|
||||
- jsonPath: .status.jobDisplayName
|
||||
name: JobDisplayName
|
||||
type: string
|
||||
- jsonPath: .status.jobId
|
||||
name: JobId
|
||||
type: string
|
||||
- jsonPath: .status.message
|
||||
name: Message
|
||||
type: string
|
||||
@@ -430,6 +427,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -444,6 +442,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -604,6 +603,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -618,6 +618,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -706,8 +707,8 @@ spec:
|
||||
most preferred is the one with the greatest sum of weights, i.e.
|
||||
for each node that meets all of the scheduling requirements (resource
|
||||
request, requiredDuringScheduling anti-affinity expressions, etc.),
|
||||
compute a sum by iterating through the elements of this field and subtracting
|
||||
"weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
|
||||
compute a sum by iterating through the elements of this field and adding
|
||||
"weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
|
||||
node(s) with the highest sum are the most preferred.
|
||||
items:
|
||||
description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
|
||||
@@ -771,6 +772,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -785,6 +787,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -945,6 +948,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -959,6 +963,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -1085,9 +1090,7 @@ spec:
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
@@ -1141,42 +1144,6 @@ spec:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fileKeyRef:
|
||||
description: |-
|
||||
FileKeyRef selects a key of the env file.
|
||||
Requires the EnvFiles feature gate to be enabled.
|
||||
properties:
|
||||
key:
|
||||
description: |-
|
||||
The key within the env file. An invalid key will prevent the pod from starting.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
|
||||
type: string
|
||||
optional:
|
||||
default: false
|
||||
description: |-
|
||||
Specify whether the file or its key must be defined. If the file or key
|
||||
does not exist, then the env var is not published.
|
||||
If optional is set to true and the specified key does not exist,
|
||||
the environment variable will not be set in the Pod's containers.
|
||||
|
||||
If optional is set to false and the specified key does not exist,
|
||||
an error will be returned during Pod creation.
|
||||
type: boolean
|
||||
path:
|
||||
description: |-
|
||||
The path within the volume from which to select the file.
|
||||
Must be relative and may not contain the '..' path or start with '..'.
|
||||
type: string
|
||||
volumeName:
|
||||
description: The name of the volume mount containing the env file.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- path
|
||||
- volumeName
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
@@ -1232,13 +1199,13 @@ spec:
|
||||
envFrom:
|
||||
description: |-
|
||||
List of sources to populate environment variables in the container.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
When a key exists in multiple
|
||||
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
will be reported as an event when the container is starting. When a key exists in multiple
|
||||
sources, the value associated with the last source will take precedence.
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -1258,9 +1225,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: |-
|
||||
Optional text to prepend to the name of each environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -1509,12 +1474,6 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: |-
|
||||
@@ -1905,7 +1864,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -1956,10 +1915,10 @@ spec:
|
||||
restartPolicy:
|
||||
description: |-
|
||||
RestartPolicy defines the restart behavior of individual containers in a pod.
|
||||
This overrides the pod-level restart policy. When this field is not specified,
|
||||
This field may only be set for init containers, and the only allowed value is "Always".
|
||||
For non-init containers or when this field is not specified,
|
||||
the restart behavior is defined by the Pod's restart policy and the container type.
|
||||
Additionally, setting the RestartPolicy as "Always" for the init container will
|
||||
have the following effect:
|
||||
Setting the RestartPolicy as "Always" for the init container will have the following effect:
|
||||
this init container will be continually restarted on
|
||||
exit until all regular containers have terminated. Once all regular
|
||||
containers have completed, all init containers with restartPolicy "Always"
|
||||
@@ -1971,57 +1930,6 @@ spec:
|
||||
init container is started, or after any startupProbe has successfully
|
||||
completed.
|
||||
type: string
|
||||
restartPolicyRules:
|
||||
description: |-
|
||||
Represents a list of rules to be checked to determine if the
|
||||
container should be restarted on exit. The rules are evaluated in
|
||||
order. Once a rule matches a container exit condition, the remaining
|
||||
rules are ignored. If no rule matches the container exit condition,
|
||||
the Container-level restart policy determines the whether the container
|
||||
is restarted or not. Constraints on the rules:
|
||||
- At most 20 rules are allowed.
|
||||
- Rules can have the same action.
|
||||
- Identical rules are not forbidden in validations.
|
||||
When rules are specified, container MUST set RestartPolicy explicitly
|
||||
even it if matches the Pod's RestartPolicy.
|
||||
items:
|
||||
description: ContainerRestartRule describes how a container exit is handled.
|
||||
properties:
|
||||
action:
|
||||
description: |-
|
||||
Specifies the action taken on a container exit if the requirements
|
||||
are satisfied. The only possible value is "Restart" to restart the
|
||||
container.
|
||||
type: string
|
||||
exitCodes:
|
||||
description: Represents the exit codes to check on container exits.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents the relationship between the container exit code(s) and the
|
||||
specified values. Possible values are:
|
||||
- In: the requirement is satisfied if the container exit code is in the
|
||||
set of specified values.
|
||||
- NotIn: the requirement is satisfied if the container exit code is
|
||||
not in the set of specified values.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
Specifies the set of values to check for container exit codes.
|
||||
At most 255 elements are allowed.
|
||||
items:
|
||||
format: int32
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
required:
|
||||
- operator
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
securityContext:
|
||||
description: |-
|
||||
SecurityContext defines the security options the container should be run with.
|
||||
@@ -2619,9 +2527,7 @@ spec:
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
@@ -2675,42 +2581,6 @@ spec:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fileKeyRef:
|
||||
description: |-
|
||||
FileKeyRef selects a key of the env file.
|
||||
Requires the EnvFiles feature gate to be enabled.
|
||||
properties:
|
||||
key:
|
||||
description: |-
|
||||
The key within the env file. An invalid key will prevent the pod from starting.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
|
||||
type: string
|
||||
optional:
|
||||
default: false
|
||||
description: |-
|
||||
Specify whether the file or its key must be defined. If the file or key
|
||||
does not exist, then the env var is not published.
|
||||
If optional is set to true and the specified key does not exist,
|
||||
the environment variable will not be set in the Pod's containers.
|
||||
|
||||
If optional is set to false and the specified key does not exist,
|
||||
an error will be returned during Pod creation.
|
||||
type: boolean
|
||||
path:
|
||||
description: |-
|
||||
The path within the volume from which to select the file.
|
||||
Must be relative and may not contain the '..' path or start with '..'.
|
||||
type: string
|
||||
volumeName:
|
||||
description: The name of the volume mount containing the env file.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- path
|
||||
- volumeName
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
@@ -2766,13 +2636,13 @@ spec:
|
||||
envFrom:
|
||||
description: |-
|
||||
List of sources to populate environment variables in the container.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
When a key exists in multiple
|
||||
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
will be reported as an event when the container is starting. When a key exists in multiple
|
||||
sources, the value associated with the last source will take precedence.
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -2792,9 +2662,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: |-
|
||||
Optional text to prepend to the name of each environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -3039,12 +2907,6 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: Probes are not allowed for ephemeral containers.
|
||||
@@ -3418,7 +3280,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -3470,51 +3332,9 @@ spec:
|
||||
description: |-
|
||||
Restart policy for the container to manage the restart behavior of each
|
||||
container within a pod.
|
||||
You cannot set this field on ephemeral containers.
|
||||
type: string
|
||||
restartPolicyRules:
|
||||
description: |-
|
||||
Represents a list of rules to be checked to determine if the
|
||||
container should be restarted on exit. You cannot set this field on
|
||||
This may only be set for init containers. You cannot set this field on
|
||||
ephemeral containers.
|
||||
items:
|
||||
description: ContainerRestartRule describes how a container exit is handled.
|
||||
properties:
|
||||
action:
|
||||
description: |-
|
||||
Specifies the action taken on a container exit if the requirements
|
||||
are satisfied. The only possible value is "Restart" to restart the
|
||||
container.
|
||||
type: string
|
||||
exitCodes:
|
||||
description: Represents the exit codes to check on container exits.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents the relationship between the container exit code(s) and the
|
||||
specified values. Possible values are:
|
||||
- In: the requirement is satisfied if the container exit code is in the
|
||||
set of specified values.
|
||||
- NotIn: the requirement is satisfied if the container exit code is
|
||||
not in the set of specified values.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
Specifies the set of values to check for container exit codes.
|
||||
At most 255 elements are allowed.
|
||||
items:
|
||||
format: int32
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
required:
|
||||
- operator
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
securityContext:
|
||||
description: |-
|
||||
Optional: SecurityContext defines the security options the ephemeral container should be run with.
|
||||
@@ -4033,9 +3853,7 @@ spec:
|
||||
hostNetwork:
|
||||
description: |-
|
||||
Host networking requested for this pod. Use the host's network namespace.
|
||||
When using HostNetwork you should specify ports so the scheduler is aware.
|
||||
When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
|
||||
and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
|
||||
If this option is set, the ports that will be used must be specified.
|
||||
Default to false.
|
||||
type: boolean
|
||||
hostPID:
|
||||
@@ -4060,19 +3878,6 @@ spec:
|
||||
Specifies the hostname of the Pod
|
||||
If not specified, the pod's hostname will be set to a system-defined value.
|
||||
type: string
|
||||
hostnameOverride:
|
||||
description: |-
|
||||
HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
|
||||
This field only specifies the pod's hostname and does not affect its DNS records.
|
||||
When this field is set to a non-empty string:
|
||||
- It takes precedence over the values set in `hostname` and `subdomain`.
|
||||
- The Pod's hostname will be set to this value.
|
||||
- `setHostnameAsFQDN` must be nil or set to false.
|
||||
- `hostNetwork` must be set to false.
|
||||
|
||||
This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
|
||||
Requires the HostnameOverride feature gate to be enabled.
|
||||
type: string
|
||||
imagePullSecrets:
|
||||
description: |-
|
||||
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
|
||||
@@ -4108,7 +3913,7 @@ spec:
|
||||
Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
|
||||
The resourceRequirements of an init container are taken into account during scheduling
|
||||
by finding the highest request/limit for each resource type, and then using the max of
|
||||
that value or the sum of the normal containers. Limits are applied to init containers
|
||||
of that value or the sum of the normal containers. Limits are applied to init containers
|
||||
in a similar fashion.
|
||||
Init containers cannot currently be added or removed.
|
||||
Cannot be updated.
|
||||
@@ -4152,9 +3957,7 @@ spec:
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
@@ -4208,42 +4011,6 @@ spec:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fileKeyRef:
|
||||
description: |-
|
||||
FileKeyRef selects a key of the env file.
|
||||
Requires the EnvFiles feature gate to be enabled.
|
||||
properties:
|
||||
key:
|
||||
description: |-
|
||||
The key within the env file. An invalid key will prevent the pod from starting.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
|
||||
type: string
|
||||
optional:
|
||||
default: false
|
||||
description: |-
|
||||
Specify whether the file or its key must be defined. If the file or key
|
||||
does not exist, then the env var is not published.
|
||||
If optional is set to true and the specified key does not exist,
|
||||
the environment variable will not be set in the Pod's containers.
|
||||
|
||||
If optional is set to false and the specified key does not exist,
|
||||
an error will be returned during Pod creation.
|
||||
type: boolean
|
||||
path:
|
||||
description: |-
|
||||
The path within the volume from which to select the file.
|
||||
Must be relative and may not contain the '..' path or start with '..'.
|
||||
type: string
|
||||
volumeName:
|
||||
description: The name of the volume mount containing the env file.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- path
|
||||
- volumeName
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
@@ -4299,13 +4066,13 @@ spec:
|
||||
envFrom:
|
||||
description: |-
|
||||
List of sources to populate environment variables in the container.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
When a key exists in multiple
|
||||
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
will be reported as an event when the container is starting. When a key exists in multiple
|
||||
sources, the value associated with the last source will take precedence.
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -4325,9 +4092,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: |-
|
||||
Optional text to prepend to the name of each environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -4576,12 +4341,6 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: |-
|
||||
@@ -4972,7 +4731,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -5023,10 +4782,10 @@ spec:
|
||||
restartPolicy:
|
||||
description: |-
|
||||
RestartPolicy defines the restart behavior of individual containers in a pod.
|
||||
This overrides the pod-level restart policy. When this field is not specified,
|
||||
This field may only be set for init containers, and the only allowed value is "Always".
|
||||
For non-init containers or when this field is not specified,
|
||||
the restart behavior is defined by the Pod's restart policy and the container type.
|
||||
Additionally, setting the RestartPolicy as "Always" for the init container will
|
||||
have the following effect:
|
||||
Setting the RestartPolicy as "Always" for the init container will have the following effect:
|
||||
this init container will be continually restarted on
|
||||
exit until all regular containers have terminated. Once all regular
|
||||
containers have completed, all init containers with restartPolicy "Always"
|
||||
@@ -5038,57 +4797,6 @@ spec:
|
||||
init container is started, or after any startupProbe has successfully
|
||||
completed.
|
||||
type: string
|
||||
restartPolicyRules:
|
||||
description: |-
|
||||
Represents a list of rules to be checked to determine if the
|
||||
container should be restarted on exit. The rules are evaluated in
|
||||
order. Once a rule matches a container exit condition, the remaining
|
||||
rules are ignored. If no rule matches the container exit condition,
|
||||
the Container-level restart policy determines the whether the container
|
||||
is restarted or not. Constraints on the rules:
|
||||
- At most 20 rules are allowed.
|
||||
- Rules can have the same action.
|
||||
- Identical rules are not forbidden in validations.
|
||||
When rules are specified, container MUST set RestartPolicy explicitly
|
||||
even it if matches the Pod's RestartPolicy.
|
||||
items:
|
||||
description: ContainerRestartRule describes how a container exit is handled.
|
||||
properties:
|
||||
action:
|
||||
description: |-
|
||||
Specifies the action taken on a container exit if the requirements
|
||||
are satisfied. The only possible value is "Restart" to restart the
|
||||
container.
|
||||
type: string
|
||||
exitCodes:
|
||||
description: Represents the exit codes to check on container exits.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents the relationship between the container exit code(s) and the
|
||||
specified values. Possible values are:
|
||||
- In: the requirement is satisfied if the container exit code is in the
|
||||
set of specified values.
|
||||
- NotIn: the requirement is satisfied if the container exit code is
|
||||
not in the set of specified values.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
Specifies the set of values to check for container exit codes.
|
||||
At most 255 elements are allowed.
|
||||
items:
|
||||
format: int32
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
required:
|
||||
- operator
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
securityContext:
|
||||
description: |-
|
||||
SecurityContext defines the security options the container should be run with.
|
||||
@@ -5602,7 +5310,6 @@ spec:
|
||||
- spec.hostPID
|
||||
- spec.hostIPC
|
||||
- spec.hostUsers
|
||||
- spec.resources
|
||||
- spec.securityContext.appArmorProfile
|
||||
- spec.securityContext.seLinuxOptions
|
||||
- spec.securityContext.seccompProfile
|
||||
@@ -5754,7 +5461,7 @@ spec:
|
||||
description: |-
|
||||
Resources is the total amount of CPU and Memory resources required by all
|
||||
containers in the pod. It supports specifying Requests and Limits for
|
||||
"cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
|
||||
"cpu" and "memory" resource names only. ResourceClaims are not supported.
|
||||
|
||||
This field enables fine-grained control over resource allocation for the
|
||||
entire pod, allowing resource sharing among containers in a pod.
|
||||
@@ -5767,7 +5474,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -6295,6 +6002,7 @@ spec:
|
||||
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
|
||||
|
||||
If this value is nil, the behavior is equivalent to the Honor policy.
|
||||
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
type: string
|
||||
nodeTaintsPolicy:
|
||||
description: |-
|
||||
@@ -6305,6 +6013,7 @@ spec:
|
||||
- Ignore: node taints are ignored. All nodes are included.
|
||||
|
||||
If this value is nil, the behavior is equivalent to the Ignore policy.
|
||||
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
type: string
|
||||
topologyKey:
|
||||
description: |-
|
||||
@@ -7010,13 +6719,15 @@ spec:
|
||||
volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
|
||||
If specified, the CSI driver will create or update the volume with the attributes defined
|
||||
in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
|
||||
it can be changed after the claim is created. An empty string or nil value indicates that no
|
||||
VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
|
||||
this field can be reset to its previous value (including nil) to cancel the modification.
|
||||
it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
|
||||
will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
|
||||
If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
|
||||
will be set by the persistentvolume controller if it exists.
|
||||
If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
|
||||
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
|
||||
exists.
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
|
||||
(Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
|
||||
type: string
|
||||
volumeMode:
|
||||
description: |-
|
||||
@@ -7190,9 +6901,12 @@ spec:
|
||||
description: |-
|
||||
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
|
||||
Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
|
||||
More info: https://examples.k8s.io/volumes/glusterfs/README.md
|
||||
properties:
|
||||
endpoints:
|
||||
description: endpoints is the endpoint name that details Glusterfs topology.
|
||||
description: |-
|
||||
endpoints is the endpoint name that details Glusterfs topology.
|
||||
More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
|
||||
type: string
|
||||
path:
|
||||
description: |-
|
||||
@@ -7246,7 +6960,7 @@ spec:
|
||||
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
|
||||
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
|
||||
The volume will be mounted read-only (ro) and non-executable files (noexec).
|
||||
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
|
||||
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
|
||||
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
|
||||
properties:
|
||||
pullPolicy:
|
||||
@@ -7271,7 +6985,7 @@ spec:
|
||||
description: |-
|
||||
iscsi represents an ISCSI Disk resource that is attached to a
|
||||
kubelet's host machine and then exposed to the pod.
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
|
||||
More info: https://examples.k8s.io/volumes/iscsi/README.md
|
||||
properties:
|
||||
chapAuthDiscovery:
|
||||
description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
|
||||
@@ -7661,110 +7375,6 @@ spec:
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
podCertificate:
|
||||
description: |-
|
||||
Projects an auto-rotating credential bundle (private key and certificate
|
||||
chain) that the pod can use either as a TLS client or server.
|
||||
|
||||
Kubelet generates a private key and uses it to send a
|
||||
PodCertificateRequest to the named signer. Once the signer approves the
|
||||
request and issues a certificate chain, Kubelet writes the key and
|
||||
certificate chain to the pod filesystem. The pod does not start until
|
||||
certificates have been issued for each podCertificate projected volume
|
||||
source in its spec.
|
||||
|
||||
Kubelet will begin trying to rotate the certificate at the time indicated
|
||||
by the signer using the PodCertificateRequest.Status.BeginRefreshAt
|
||||
timestamp.
|
||||
|
||||
Kubelet can write a single file, indicated by the credentialBundlePath
|
||||
field, or separate files, indicated by the keyPath and
|
||||
certificateChainPath fields.
|
||||
|
||||
The credential bundle is a single file in PEM format. The first PEM
|
||||
entry is the private key (in PKCS#8 format), and the remaining PEM
|
||||
entries are the certificate chain issued by the signer (typically,
|
||||
signers will return their certificate chain in leaf-to-root order).
|
||||
|
||||
Prefer using the credential bundle format, since your application code
|
||||
can read it atomically. If you use keyPath and certificateChainPath,
|
||||
your application must make two separate file reads. If these coincide
|
||||
with a certificate rotation, it is possible that the private key and leaf
|
||||
certificate you read may not correspond to each other. Your application
|
||||
will need to check for this condition, and re-read until they are
|
||||
consistent.
|
||||
|
||||
The named signer controls chooses the format of the certificate it
|
||||
issues; consult the signer implementation's documentation to learn how to
|
||||
use the certificates it issues.
|
||||
properties:
|
||||
certificateChainPath:
|
||||
description: |-
|
||||
Write the certificate chain at this path in the projected volume.
|
||||
|
||||
Most applications should use credentialBundlePath. When using keyPath
|
||||
and certificateChainPath, your application needs to check that the key
|
||||
and leaf certificate are consistent, because it is possible to read the
|
||||
files mid-rotation.
|
||||
type: string
|
||||
credentialBundlePath:
|
||||
description: |-
|
||||
Write the credential bundle at this path in the projected volume.
|
||||
|
||||
The credential bundle is a single file that contains multiple PEM blocks.
|
||||
The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
|
||||
key.
|
||||
|
||||
The remaining blocks are CERTIFICATE blocks, containing the issued
|
||||
certificate chain from the signer (leaf and any intermediates).
|
||||
|
||||
Using credentialBundlePath lets your Pod's application code make a single
|
||||
atomic read that retrieves a consistent key and certificate chain. If you
|
||||
project them to separate files, your application code will need to
|
||||
additionally check that the leaf certificate was issued to the key.
|
||||
type: string
|
||||
keyPath:
|
||||
description: |-
|
||||
Write the key at this path in the projected volume.
|
||||
|
||||
Most applications should use credentialBundlePath. When using keyPath
|
||||
and certificateChainPath, your application needs to check that the key
|
||||
and leaf certificate are consistent, because it is possible to read the
|
||||
files mid-rotation.
|
||||
type: string
|
||||
keyType:
|
||||
description: |-
|
||||
The type of keypair Kubelet will generate for the pod.
|
||||
|
||||
Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
|
||||
"ECDSAP521", and "ED25519".
|
||||
type: string
|
||||
maxExpirationSeconds:
|
||||
description: |-
|
||||
maxExpirationSeconds is the maximum lifetime permitted for the
|
||||
certificate.
|
||||
|
||||
Kubelet copies this value verbatim into the PodCertificateRequests it
|
||||
generates for this projection.
|
||||
|
||||
If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
|
||||
will reject values shorter than 3600 (1 hour). The maximum allowable
|
||||
value is 7862400 (91 days).
|
||||
|
||||
The signer implementation is then free to issue a certificate with any
|
||||
lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
|
||||
seconds (1 hour). This constraint is enforced by kube-apiserver.
|
||||
`kubernetes.io` signers will never issue certificates with a lifetime
|
||||
longer than 24 hours.
|
||||
format: int32
|
||||
type: integer
|
||||
signerName:
|
||||
description: Kubelet's generated CSRs will be addressed to this signer.
|
||||
type: string
|
||||
required:
|
||||
- keyType
|
||||
- signerName
|
||||
type: object
|
||||
secret:
|
||||
description: secret information about the secret data to project
|
||||
properties:
|
||||
@@ -7894,6 +7504,7 @@ spec:
|
||||
description: |-
|
||||
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
|
||||
Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
|
||||
More info: https://examples.k8s.io/volumes/rbd/README.md
|
||||
properties:
|
||||
fsType:
|
||||
description: |-
|
||||
@@ -8173,53 +7784,6 @@ spec:
|
||||
required:
|
||||
- containers
|
||||
type: object
|
||||
vaultConfig:
|
||||
properties:
|
||||
azureKeyVault:
|
||||
properties:
|
||||
certificatePath:
|
||||
type: string
|
||||
clientId:
|
||||
type: string
|
||||
tenantId:
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
required:
|
||||
- certificatePath
|
||||
- clientId
|
||||
- tenantId
|
||||
- url
|
||||
type: object
|
||||
proxy:
|
||||
properties:
|
||||
http:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
https:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
noProxy:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type:
|
||||
description: |-
|
||||
VaultType represents the type of vault that can be used in the application.
|
||||
It is used to identify which vault integration should be used to resolve secrets.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- githubConfigSecret
|
||||
- githubConfigUrl
|
||||
@@ -8230,13 +7794,10 @@ spec:
|
||||
properties:
|
||||
failures:
|
||||
additionalProperties:
|
||||
format: date-time
|
||||
type: string
|
||||
type: boolean
|
||||
type: object
|
||||
jobDisplayName:
|
||||
type: string
|
||||
jobId:
|
||||
type: string
|
||||
jobRepositoryName:
|
||||
type: string
|
||||
jobRequestId:
|
||||
@@ -8265,6 +7826,8 @@ spec:
|
||||
type: string
|
||||
runnerId:
|
||||
type: integer
|
||||
runnerJITConfig:
|
||||
type: string
|
||||
runnerName:
|
||||
type: string
|
||||
workflowRunId:
|
||||
@@ -8276,3 +7839,4 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.19.0
|
||||
controller-gen.kubebuilder.io/version: v0.17.2
|
||||
name: ephemeralrunnersets.actions.github.com
|
||||
spec:
|
||||
group: actions.github.com
|
||||
@@ -421,6 +421,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -435,6 +436,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -595,6 +597,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -609,6 +612,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -697,8 +701,8 @@ spec:
|
||||
most preferred is the one with the greatest sum of weights, i.e.
|
||||
for each node that meets all of the scheduling requirements (resource
|
||||
request, requiredDuringScheduling anti-affinity expressions, etc.),
|
||||
compute a sum by iterating through the elements of this field and subtracting
|
||||
"weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
|
||||
compute a sum by iterating through the elements of this field and adding
|
||||
"weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
|
||||
node(s) with the highest sum are the most preferred.
|
||||
items:
|
||||
description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
|
||||
@@ -762,6 +766,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -776,6 +781,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -936,6 +942,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -950,6 +957,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -1076,9 +1084,7 @@ spec:
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
@@ -1132,42 +1138,6 @@ spec:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fileKeyRef:
|
||||
description: |-
|
||||
FileKeyRef selects a key of the env file.
|
||||
Requires the EnvFiles feature gate to be enabled.
|
||||
properties:
|
||||
key:
|
||||
description: |-
|
||||
The key within the env file. An invalid key will prevent the pod from starting.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
|
||||
type: string
|
||||
optional:
|
||||
default: false
|
||||
description: |-
|
||||
Specify whether the file or its key must be defined. If the file or key
|
||||
does not exist, then the env var is not published.
|
||||
If optional is set to true and the specified key does not exist,
|
||||
the environment variable will not be set in the Pod's containers.
|
||||
|
||||
If optional is set to false and the specified key does not exist,
|
||||
an error will be returned during Pod creation.
|
||||
type: boolean
|
||||
path:
|
||||
description: |-
|
||||
The path within the volume from which to select the file.
|
||||
Must be relative and may not contain the '..' path or start with '..'.
|
||||
type: string
|
||||
volumeName:
|
||||
description: The name of the volume mount containing the env file.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- path
|
||||
- volumeName
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
@@ -1223,13 +1193,13 @@ spec:
|
||||
envFrom:
|
||||
description: |-
|
||||
List of sources to populate environment variables in the container.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
When a key exists in multiple
|
||||
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
will be reported as an event when the container is starting. When a key exists in multiple
|
||||
sources, the value associated with the last source will take precedence.
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -1249,9 +1219,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: |-
|
||||
Optional text to prepend to the name of each environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -1500,12 +1468,6 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: |-
|
||||
@@ -1896,7 +1858,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -1947,10 +1909,10 @@ spec:
|
||||
restartPolicy:
|
||||
description: |-
|
||||
RestartPolicy defines the restart behavior of individual containers in a pod.
|
||||
This overrides the pod-level restart policy. When this field is not specified,
|
||||
This field may only be set for init containers, and the only allowed value is "Always".
|
||||
For non-init containers or when this field is not specified,
|
||||
the restart behavior is defined by the Pod's restart policy and the container type.
|
||||
Additionally, setting the RestartPolicy as "Always" for the init container will
|
||||
have the following effect:
|
||||
Setting the RestartPolicy as "Always" for the init container will have the following effect:
|
||||
this init container will be continually restarted on
|
||||
exit until all regular containers have terminated. Once all regular
|
||||
containers have completed, all init containers with restartPolicy "Always"
|
||||
@@ -1962,57 +1924,6 @@ spec:
|
||||
init container is started, or after any startupProbe has successfully
|
||||
completed.
|
||||
type: string
|
||||
restartPolicyRules:
|
||||
description: |-
|
||||
Represents a list of rules to be checked to determine if the
|
||||
container should be restarted on exit. The rules are evaluated in
|
||||
order. Once a rule matches a container exit condition, the remaining
|
||||
rules are ignored. If no rule matches the container exit condition,
|
||||
the Container-level restart policy determines the whether the container
|
||||
is restarted or not. Constraints on the rules:
|
||||
- At most 20 rules are allowed.
|
||||
- Rules can have the same action.
|
||||
- Identical rules are not forbidden in validations.
|
||||
When rules are specified, container MUST set RestartPolicy explicitly
|
||||
even it if matches the Pod's RestartPolicy.
|
||||
items:
|
||||
description: ContainerRestartRule describes how a container exit is handled.
|
||||
properties:
|
||||
action:
|
||||
description: |-
|
||||
Specifies the action taken on a container exit if the requirements
|
||||
are satisfied. The only possible value is "Restart" to restart the
|
||||
container.
|
||||
type: string
|
||||
exitCodes:
|
||||
description: Represents the exit codes to check on container exits.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents the relationship between the container exit code(s) and the
|
||||
specified values. Possible values are:
|
||||
- In: the requirement is satisfied if the container exit code is in the
|
||||
set of specified values.
|
||||
- NotIn: the requirement is satisfied if the container exit code is
|
||||
not in the set of specified values.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
Specifies the set of values to check for container exit codes.
|
||||
At most 255 elements are allowed.
|
||||
items:
|
||||
format: int32
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
required:
|
||||
- operator
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
securityContext:
|
||||
description: |-
|
||||
SecurityContext defines the security options the container should be run with.
|
||||
@@ -2610,9 +2521,7 @@ spec:
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
@@ -2666,42 +2575,6 @@ spec:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fileKeyRef:
|
||||
description: |-
|
||||
FileKeyRef selects a key of the env file.
|
||||
Requires the EnvFiles feature gate to be enabled.
|
||||
properties:
|
||||
key:
|
||||
description: |-
|
||||
The key within the env file. An invalid key will prevent the pod from starting.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
|
||||
type: string
|
||||
optional:
|
||||
default: false
|
||||
description: |-
|
||||
Specify whether the file or its key must be defined. If the file or key
|
||||
does not exist, then the env var is not published.
|
||||
If optional is set to true and the specified key does not exist,
|
||||
the environment variable will not be set in the Pod's containers.
|
||||
|
||||
If optional is set to false and the specified key does not exist,
|
||||
an error will be returned during Pod creation.
|
||||
type: boolean
|
||||
path:
|
||||
description: |-
|
||||
The path within the volume from which to select the file.
|
||||
Must be relative and may not contain the '..' path or start with '..'.
|
||||
type: string
|
||||
volumeName:
|
||||
description: The name of the volume mount containing the env file.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- path
|
||||
- volumeName
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
@@ -2757,13 +2630,13 @@ spec:
|
||||
envFrom:
|
||||
description: |-
|
||||
List of sources to populate environment variables in the container.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
When a key exists in multiple
|
||||
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
will be reported as an event when the container is starting. When a key exists in multiple
|
||||
sources, the value associated with the last source will take precedence.
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -2783,9 +2656,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: |-
|
||||
Optional text to prepend to the name of each environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -3030,12 +2901,6 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: Probes are not allowed for ephemeral containers.
|
||||
@@ -3409,7 +3274,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -3461,51 +3326,9 @@ spec:
|
||||
description: |-
|
||||
Restart policy for the container to manage the restart behavior of each
|
||||
container within a pod.
|
||||
You cannot set this field on ephemeral containers.
|
||||
type: string
|
||||
restartPolicyRules:
|
||||
description: |-
|
||||
Represents a list of rules to be checked to determine if the
|
||||
container should be restarted on exit. You cannot set this field on
|
||||
This may only be set for init containers. You cannot set this field on
|
||||
ephemeral containers.
|
||||
items:
|
||||
description: ContainerRestartRule describes how a container exit is handled.
|
||||
properties:
|
||||
action:
|
||||
description: |-
|
||||
Specifies the action taken on a container exit if the requirements
|
||||
are satisfied. The only possible value is "Restart" to restart the
|
||||
container.
|
||||
type: string
|
||||
exitCodes:
|
||||
description: Represents the exit codes to check on container exits.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents the relationship between the container exit code(s) and the
|
||||
specified values. Possible values are:
|
||||
- In: the requirement is satisfied if the container exit code is in the
|
||||
set of specified values.
|
||||
- NotIn: the requirement is satisfied if the container exit code is
|
||||
not in the set of specified values.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
Specifies the set of values to check for container exit codes.
|
||||
At most 255 elements are allowed.
|
||||
items:
|
||||
format: int32
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
required:
|
||||
- operator
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
securityContext:
|
||||
description: |-
|
||||
Optional: SecurityContext defines the security options the ephemeral container should be run with.
|
||||
@@ -4024,9 +3847,7 @@ spec:
|
||||
hostNetwork:
|
||||
description: |-
|
||||
Host networking requested for this pod. Use the host's network namespace.
|
||||
When using HostNetwork you should specify ports so the scheduler is aware.
|
||||
When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
|
||||
and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
|
||||
If this option is set, the ports that will be used must be specified.
|
||||
Default to false.
|
||||
type: boolean
|
||||
hostPID:
|
||||
@@ -4051,19 +3872,6 @@ spec:
|
||||
Specifies the hostname of the Pod
|
||||
If not specified, the pod's hostname will be set to a system-defined value.
|
||||
type: string
|
||||
hostnameOverride:
|
||||
description: |-
|
||||
HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
|
||||
This field only specifies the pod's hostname and does not affect its DNS records.
|
||||
When this field is set to a non-empty string:
|
||||
- It takes precedence over the values set in `hostname` and `subdomain`.
|
||||
- The Pod's hostname will be set to this value.
|
||||
- `setHostnameAsFQDN` must be nil or set to false.
|
||||
- `hostNetwork` must be set to false.
|
||||
|
||||
This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
|
||||
Requires the HostnameOverride feature gate to be enabled.
|
||||
type: string
|
||||
imagePullSecrets:
|
||||
description: |-
|
||||
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
|
||||
@@ -4099,7 +3907,7 @@ spec:
|
||||
Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
|
||||
The resourceRequirements of an init container are taken into account during scheduling
|
||||
by finding the highest request/limit for each resource type, and then using the max of
|
||||
that value or the sum of the normal containers. Limits are applied to init containers
|
||||
of that value or the sum of the normal containers. Limits are applied to init containers
|
||||
in a similar fashion.
|
||||
Init containers cannot currently be added or removed.
|
||||
Cannot be updated.
|
||||
@@ -4143,9 +3951,7 @@ spec:
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
@@ -4199,42 +4005,6 @@ spec:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fileKeyRef:
|
||||
description: |-
|
||||
FileKeyRef selects a key of the env file.
|
||||
Requires the EnvFiles feature gate to be enabled.
|
||||
properties:
|
||||
key:
|
||||
description: |-
|
||||
The key within the env file. An invalid key will prevent the pod from starting.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
|
||||
type: string
|
||||
optional:
|
||||
default: false
|
||||
description: |-
|
||||
Specify whether the file or its key must be defined. If the file or key
|
||||
does not exist, then the env var is not published.
|
||||
If optional is set to true and the specified key does not exist,
|
||||
the environment variable will not be set in the Pod's containers.
|
||||
|
||||
If optional is set to false and the specified key does not exist,
|
||||
an error will be returned during Pod creation.
|
||||
type: boolean
|
||||
path:
|
||||
description: |-
|
||||
The path within the volume from which to select the file.
|
||||
Must be relative and may not contain the '..' path or start with '..'.
|
||||
type: string
|
||||
volumeName:
|
||||
description: The name of the volume mount containing the env file.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- path
|
||||
- volumeName
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
@@ -4290,13 +4060,13 @@ spec:
|
||||
envFrom:
|
||||
description: |-
|
||||
List of sources to populate environment variables in the container.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
When a key exists in multiple
|
||||
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
will be reported as an event when the container is starting. When a key exists in multiple
|
||||
sources, the value associated with the last source will take precedence.
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -4316,9 +4086,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: |-
|
||||
Optional text to prepend to the name of each environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -4567,12 +4335,6 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: |-
|
||||
@@ -4963,7 +4725,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -5014,10 +4776,10 @@ spec:
|
||||
restartPolicy:
|
||||
description: |-
|
||||
RestartPolicy defines the restart behavior of individual containers in a pod.
|
||||
This overrides the pod-level restart policy. When this field is not specified,
|
||||
This field may only be set for init containers, and the only allowed value is "Always".
|
||||
For non-init containers or when this field is not specified,
|
||||
the restart behavior is defined by the Pod's restart policy and the container type.
|
||||
Additionally, setting the RestartPolicy as "Always" for the init container will
|
||||
have the following effect:
|
||||
Setting the RestartPolicy as "Always" for the init container will have the following effect:
|
||||
this init container will be continually restarted on
|
||||
exit until all regular containers have terminated. Once all regular
|
||||
containers have completed, all init containers with restartPolicy "Always"
|
||||
@@ -5029,57 +4791,6 @@ spec:
|
||||
init container is started, or after any startupProbe has successfully
|
||||
completed.
|
||||
type: string
|
||||
restartPolicyRules:
|
||||
description: |-
|
||||
Represents a list of rules to be checked to determine if the
|
||||
container should be restarted on exit. The rules are evaluated in
|
||||
order. Once a rule matches a container exit condition, the remaining
|
||||
rules are ignored. If no rule matches the container exit condition,
|
||||
the Container-level restart policy determines the whether the container
|
||||
is restarted or not. Constraints on the rules:
|
||||
- At most 20 rules are allowed.
|
||||
- Rules can have the same action.
|
||||
- Identical rules are not forbidden in validations.
|
||||
When rules are specified, container MUST set RestartPolicy explicitly
|
||||
even it if matches the Pod's RestartPolicy.
|
||||
items:
|
||||
description: ContainerRestartRule describes how a container exit is handled.
|
||||
properties:
|
||||
action:
|
||||
description: |-
|
||||
Specifies the action taken on a container exit if the requirements
|
||||
are satisfied. The only possible value is "Restart" to restart the
|
||||
container.
|
||||
type: string
|
||||
exitCodes:
|
||||
description: Represents the exit codes to check on container exits.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents the relationship between the container exit code(s) and the
|
||||
specified values. Possible values are:
|
||||
- In: the requirement is satisfied if the container exit code is in the
|
||||
set of specified values.
|
||||
- NotIn: the requirement is satisfied if the container exit code is
|
||||
not in the set of specified values.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
Specifies the set of values to check for container exit codes.
|
||||
At most 255 elements are allowed.
|
||||
items:
|
||||
format: int32
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
required:
|
||||
- operator
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
securityContext:
|
||||
description: |-
|
||||
SecurityContext defines the security options the container should be run with.
|
||||
@@ -5593,7 +5304,6 @@ spec:
|
||||
- spec.hostPID
|
||||
- spec.hostIPC
|
||||
- spec.hostUsers
|
||||
- spec.resources
|
||||
- spec.securityContext.appArmorProfile
|
||||
- spec.securityContext.seLinuxOptions
|
||||
- spec.securityContext.seccompProfile
|
||||
@@ -5745,7 +5455,7 @@ spec:
|
||||
description: |-
|
||||
Resources is the total amount of CPU and Memory resources required by all
|
||||
containers in the pod. It supports specifying Requests and Limits for
|
||||
"cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
|
||||
"cpu" and "memory" resource names only. ResourceClaims are not supported.
|
||||
|
||||
This field enables fine-grained control over resource allocation for the
|
||||
entire pod, allowing resource sharing among containers in a pod.
|
||||
@@ -5758,7 +5468,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -6286,6 +5996,7 @@ spec:
|
||||
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
|
||||
|
||||
If this value is nil, the behavior is equivalent to the Honor policy.
|
||||
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
type: string
|
||||
nodeTaintsPolicy:
|
||||
description: |-
|
||||
@@ -6296,6 +6007,7 @@ spec:
|
||||
- Ignore: node taints are ignored. All nodes are included.
|
||||
|
||||
If this value is nil, the behavior is equivalent to the Ignore policy.
|
||||
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
type: string
|
||||
topologyKey:
|
||||
description: |-
|
||||
@@ -7001,13 +6713,15 @@ spec:
|
||||
volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
|
||||
If specified, the CSI driver will create or update the volume with the attributes defined
|
||||
in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
|
||||
it can be changed after the claim is created. An empty string or nil value indicates that no
|
||||
VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
|
||||
this field can be reset to its previous value (including nil) to cancel the modification.
|
||||
it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
|
||||
will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
|
||||
If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
|
||||
will be set by the persistentvolume controller if it exists.
|
||||
If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
|
||||
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
|
||||
exists.
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
|
||||
(Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
|
||||
type: string
|
||||
volumeMode:
|
||||
description: |-
|
||||
@@ -7181,9 +6895,12 @@ spec:
|
||||
description: |-
|
||||
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
|
||||
Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
|
||||
More info: https://examples.k8s.io/volumes/glusterfs/README.md
|
||||
properties:
|
||||
endpoints:
|
||||
description: endpoints is the endpoint name that details Glusterfs topology.
|
||||
description: |-
|
||||
endpoints is the endpoint name that details Glusterfs topology.
|
||||
More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
|
||||
type: string
|
||||
path:
|
||||
description: |-
|
||||
@@ -7237,7 +6954,7 @@ spec:
|
||||
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
|
||||
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
|
||||
The volume will be mounted read-only (ro) and non-executable files (noexec).
|
||||
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
|
||||
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
|
||||
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
|
||||
properties:
|
||||
pullPolicy:
|
||||
@@ -7262,7 +6979,7 @@ spec:
|
||||
description: |-
|
||||
iscsi represents an ISCSI Disk resource that is attached to a
|
||||
kubelet's host machine and then exposed to the pod.
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
|
||||
More info: https://examples.k8s.io/volumes/iscsi/README.md
|
||||
properties:
|
||||
chapAuthDiscovery:
|
||||
description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
|
||||
@@ -7652,110 +7369,6 @@ spec:
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
podCertificate:
|
||||
description: |-
|
||||
Projects an auto-rotating credential bundle (private key and certificate
|
||||
chain) that the pod can use either as a TLS client or server.
|
||||
|
||||
Kubelet generates a private key and uses it to send a
|
||||
PodCertificateRequest to the named signer. Once the signer approves the
|
||||
request and issues a certificate chain, Kubelet writes the key and
|
||||
certificate chain to the pod filesystem. The pod does not start until
|
||||
certificates have been issued for each podCertificate projected volume
|
||||
source in its spec.
|
||||
|
||||
Kubelet will begin trying to rotate the certificate at the time indicated
|
||||
by the signer using the PodCertificateRequest.Status.BeginRefreshAt
|
||||
timestamp.
|
||||
|
||||
Kubelet can write a single file, indicated by the credentialBundlePath
|
||||
field, or separate files, indicated by the keyPath and
|
||||
certificateChainPath fields.
|
||||
|
||||
The credential bundle is a single file in PEM format. The first PEM
|
||||
entry is the private key (in PKCS#8 format), and the remaining PEM
|
||||
entries are the certificate chain issued by the signer (typically,
|
||||
signers will return their certificate chain in leaf-to-root order).
|
||||
|
||||
Prefer using the credential bundle format, since your application code
|
||||
can read it atomically. If you use keyPath and certificateChainPath,
|
||||
your application must make two separate file reads. If these coincide
|
||||
with a certificate rotation, it is possible that the private key and leaf
|
||||
certificate you read may not correspond to each other. Your application
|
||||
will need to check for this condition, and re-read until they are
|
||||
consistent.
|
||||
|
||||
The named signer controls chooses the format of the certificate it
|
||||
issues; consult the signer implementation's documentation to learn how to
|
||||
use the certificates it issues.
|
||||
properties:
|
||||
certificateChainPath:
|
||||
description: |-
|
||||
Write the certificate chain at this path in the projected volume.
|
||||
|
||||
Most applications should use credentialBundlePath. When using keyPath
|
||||
and certificateChainPath, your application needs to check that the key
|
||||
and leaf certificate are consistent, because it is possible to read the
|
||||
files mid-rotation.
|
||||
type: string
|
||||
credentialBundlePath:
|
||||
description: |-
|
||||
Write the credential bundle at this path in the projected volume.
|
||||
|
||||
The credential bundle is a single file that contains multiple PEM blocks.
|
||||
The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
|
||||
key.
|
||||
|
||||
The remaining blocks are CERTIFICATE blocks, containing the issued
|
||||
certificate chain from the signer (leaf and any intermediates).
|
||||
|
||||
Using credentialBundlePath lets your Pod's application code make a single
|
||||
atomic read that retrieves a consistent key and certificate chain. If you
|
||||
project them to separate files, your application code will need to
|
||||
additionally check that the leaf certificate was issued to the key.
|
||||
type: string
|
||||
keyPath:
|
||||
description: |-
|
||||
Write the key at this path in the projected volume.
|
||||
|
||||
Most applications should use credentialBundlePath. When using keyPath
|
||||
and certificateChainPath, your application needs to check that the key
|
||||
and leaf certificate are consistent, because it is possible to read the
|
||||
files mid-rotation.
|
||||
type: string
|
||||
keyType:
|
||||
description: |-
|
||||
The type of keypair Kubelet will generate for the pod.
|
||||
|
||||
Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
|
||||
"ECDSAP521", and "ED25519".
|
||||
type: string
|
||||
maxExpirationSeconds:
|
||||
description: |-
|
||||
maxExpirationSeconds is the maximum lifetime permitted for the
|
||||
certificate.
|
||||
|
||||
Kubelet copies this value verbatim into the PodCertificateRequests it
|
||||
generates for this projection.
|
||||
|
||||
If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
|
||||
will reject values shorter than 3600 (1 hour). The maximum allowable
|
||||
value is 7862400 (91 days).
|
||||
|
||||
The signer implementation is then free to issue a certificate with any
|
||||
lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
|
||||
seconds (1 hour). This constraint is enforced by kube-apiserver.
|
||||
`kubernetes.io` signers will never issue certificates with a lifetime
|
||||
longer than 24 hours.
|
||||
format: int32
|
||||
type: integer
|
||||
signerName:
|
||||
description: Kubelet's generated CSRs will be addressed to this signer.
|
||||
type: string
|
||||
required:
|
||||
- keyType
|
||||
- signerName
|
||||
type: object
|
||||
secret:
|
||||
description: secret information about the secret data to project
|
||||
properties:
|
||||
@@ -7885,6 +7498,7 @@ spec:
|
||||
description: |-
|
||||
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
|
||||
Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
|
||||
More info: https://examples.k8s.io/volumes/rbd/README.md
|
||||
properties:
|
||||
fsType:
|
||||
description: |-
|
||||
@@ -8164,53 +7778,6 @@ spec:
|
||||
required:
|
||||
- containers
|
||||
type: object
|
||||
vaultConfig:
|
||||
properties:
|
||||
azureKeyVault:
|
||||
properties:
|
||||
certificatePath:
|
||||
type: string
|
||||
clientId:
|
||||
type: string
|
||||
tenantId:
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
required:
|
||||
- certificatePath
|
||||
- clientId
|
||||
- tenantId
|
||||
- url
|
||||
type: object
|
||||
proxy:
|
||||
properties:
|
||||
http:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
https:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
noProxy:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type:
|
||||
description: |-
|
||||
VaultType represents the type of vault that can be used in the application.
|
||||
It is used to identify which vault integration should be used to resolve secrets.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- githubConfigSecret
|
||||
- githubConfigUrl
|
||||
@@ -8245,3 +7812,4 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.19.0
|
||||
controller-gen.kubebuilder.io/version: v0.17.2
|
||||
name: horizontalrunnerautoscalers.actions.summerwind.dev
|
||||
spec:
|
||||
group: actions.summerwind.dev
|
||||
@@ -32,8 +32,7 @@ spec:
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
|
||||
API
|
||||
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
@@ -53,8 +52,7 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: HorizontalRunnerAutoscalerSpec defines the desired state
|
||||
of HorizontalRunnerAutoscaler
|
||||
description: HorizontalRunnerAutoscalerSpec defines the desired state of HorizontalRunnerAutoscaler
|
||||
properties:
|
||||
capacityReservations:
|
||||
items:
|
||||
@@ -85,12 +83,10 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
maxReplicas:
|
||||
description: MaxReplicas is the maximum number of replicas the deployment
|
||||
is allowed to scale
|
||||
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
|
||||
type: integer
|
||||
metrics:
|
||||
description: Metrics is the collection of various metric targets to
|
||||
calculate desired number of runners
|
||||
description: Metrics is the collection of various metric targets to calculate desired number of runners
|
||||
items:
|
||||
properties:
|
||||
repositoryNames:
|
||||
@@ -138,8 +134,7 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
minReplicas:
|
||||
description: MinReplicas is the minimum number of replicas the deployment
|
||||
is allowed to scale
|
||||
description: MinReplicas is the minimum number of replicas the deployment is allowed to scale
|
||||
type: integer
|
||||
scaleDownDelaySecondsAfterScaleOut:
|
||||
description: |-
|
||||
@@ -147,8 +142,7 @@ spec:
|
||||
Used to prevent flapping (down->up->down->... loop)
|
||||
type: integer
|
||||
scaleTargetRef:
|
||||
description: ScaleTargetRef is the reference to scaled resource like
|
||||
RunnerDeployment
|
||||
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
|
||||
properties:
|
||||
kind:
|
||||
description: Kind is the type of resource being referenced
|
||||
@@ -240,8 +234,7 @@ spec:
|
||||
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
|
||||
properties:
|
||||
endTime:
|
||||
description: EndTime is the time at which the first override
|
||||
ends.
|
||||
description: EndTime is the time at which the first override ends.
|
||||
format: date-time
|
||||
type: string
|
||||
minReplicas:
|
||||
@@ -272,8 +265,7 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
startTime:
|
||||
description: StartTime is the time at which the first override
|
||||
starts.
|
||||
description: StartTime is the time at which the first override starts.
|
||||
format: date-time
|
||||
type: string
|
||||
required:
|
||||
@@ -322,3 +314,4 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.19.0
|
||||
controller-gen.kubebuilder.io/version: v0.17.2
|
||||
name: runnersets.actions.summerwind.dev
|
||||
spec:
|
||||
group: actions.summerwind.dev
|
||||
@@ -554,6 +554,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -568,6 +569,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -728,6 +730,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -742,6 +745,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -830,8 +834,8 @@ spec:
|
||||
most preferred is the one with the greatest sum of weights, i.e.
|
||||
for each node that meets all of the scheduling requirements (resource
|
||||
request, requiredDuringScheduling anti-affinity expressions, etc.),
|
||||
compute a sum by iterating through the elements of this field and subtracting
|
||||
"weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
|
||||
compute a sum by iterating through the elements of this field and adding
|
||||
"weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
|
||||
node(s) with the highest sum are the most preferred.
|
||||
items:
|
||||
description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
|
||||
@@ -895,6 +899,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -909,6 +914,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -1069,6 +1075,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -1083,6 +1090,7 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -1209,9 +1217,7 @@ spec:
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
@@ -1265,42 +1271,6 @@ spec:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fileKeyRef:
|
||||
description: |-
|
||||
FileKeyRef selects a key of the env file.
|
||||
Requires the EnvFiles feature gate to be enabled.
|
||||
properties:
|
||||
key:
|
||||
description: |-
|
||||
The key within the env file. An invalid key will prevent the pod from starting.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
|
||||
type: string
|
||||
optional:
|
||||
default: false
|
||||
description: |-
|
||||
Specify whether the file or its key must be defined. If the file or key
|
||||
does not exist, then the env var is not published.
|
||||
If optional is set to true and the specified key does not exist,
|
||||
the environment variable will not be set in the Pod's containers.
|
||||
|
||||
If optional is set to false and the specified key does not exist,
|
||||
an error will be returned during Pod creation.
|
||||
type: boolean
|
||||
path:
|
||||
description: |-
|
||||
The path within the volume from which to select the file.
|
||||
Must be relative and may not contain the '..' path or start with '..'.
|
||||
type: string
|
||||
volumeName:
|
||||
description: The name of the volume mount containing the env file.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- path
|
||||
- volumeName
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
@@ -1356,13 +1326,13 @@ spec:
|
||||
envFrom:
|
||||
description: |-
|
||||
List of sources to populate environment variables in the container.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
When a key exists in multiple
|
||||
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
will be reported as an event when the container is starting. When a key exists in multiple
|
||||
sources, the value associated with the last source will take precedence.
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -1382,9 +1352,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: |-
|
||||
Optional text to prepend to the name of each environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -1633,12 +1601,6 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: |-
|
||||
@@ -2029,7 +1991,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -2080,10 +2042,10 @@ spec:
|
||||
restartPolicy:
|
||||
description: |-
|
||||
RestartPolicy defines the restart behavior of individual containers in a pod.
|
||||
This overrides the pod-level restart policy. When this field is not specified,
|
||||
This field may only be set for init containers, and the only allowed value is "Always".
|
||||
For non-init containers or when this field is not specified,
|
||||
the restart behavior is defined by the Pod's restart policy and the container type.
|
||||
Additionally, setting the RestartPolicy as "Always" for the init container will
|
||||
have the following effect:
|
||||
Setting the RestartPolicy as "Always" for the init container will have the following effect:
|
||||
this init container will be continually restarted on
|
||||
exit until all regular containers have terminated. Once all regular
|
||||
containers have completed, all init containers with restartPolicy "Always"
|
||||
@@ -2095,57 +2057,6 @@ spec:
|
||||
init container is started, or after any startupProbe has successfully
|
||||
completed.
|
||||
type: string
|
||||
restartPolicyRules:
|
||||
description: |-
|
||||
Represents a list of rules to be checked to determine if the
|
||||
container should be restarted on exit. The rules are evaluated in
|
||||
order. Once a rule matches a container exit condition, the remaining
|
||||
rules are ignored. If no rule matches the container exit condition,
|
||||
the Container-level restart policy determines the whether the container
|
||||
is restarted or not. Constraints on the rules:
|
||||
- At most 20 rules are allowed.
|
||||
- Rules can have the same action.
|
||||
- Identical rules are not forbidden in validations.
|
||||
When rules are specified, container MUST set RestartPolicy explicitly
|
||||
even it if matches the Pod's RestartPolicy.
|
||||
items:
|
||||
description: ContainerRestartRule describes how a container exit is handled.
|
||||
properties:
|
||||
action:
|
||||
description: |-
|
||||
Specifies the action taken on a container exit if the requirements
|
||||
are satisfied. The only possible value is "Restart" to restart the
|
||||
container.
|
||||
type: string
|
||||
exitCodes:
|
||||
description: Represents the exit codes to check on container exits.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents the relationship between the container exit code(s) and the
|
||||
specified values. Possible values are:
|
||||
- In: the requirement is satisfied if the container exit code is in the
|
||||
set of specified values.
|
||||
- NotIn: the requirement is satisfied if the container exit code is
|
||||
not in the set of specified values.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
Specifies the set of values to check for container exit codes.
|
||||
At most 255 elements are allowed.
|
||||
items:
|
||||
format: int32
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
required:
|
||||
- operator
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
securityContext:
|
||||
description: |-
|
||||
SecurityContext defines the security options the container should be run with.
|
||||
@@ -2743,9 +2654,7 @@ spec:
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
@@ -2799,42 +2708,6 @@ spec:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fileKeyRef:
|
||||
description: |-
|
||||
FileKeyRef selects a key of the env file.
|
||||
Requires the EnvFiles feature gate to be enabled.
|
||||
properties:
|
||||
key:
|
||||
description: |-
|
||||
The key within the env file. An invalid key will prevent the pod from starting.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
|
||||
type: string
|
||||
optional:
|
||||
default: false
|
||||
description: |-
|
||||
Specify whether the file or its key must be defined. If the file or key
|
||||
does not exist, then the env var is not published.
|
||||
If optional is set to true and the specified key does not exist,
|
||||
the environment variable will not be set in the Pod's containers.
|
||||
|
||||
If optional is set to false and the specified key does not exist,
|
||||
an error will be returned during Pod creation.
|
||||
type: boolean
|
||||
path:
|
||||
description: |-
|
||||
The path within the volume from which to select the file.
|
||||
Must be relative and may not contain the '..' path or start with '..'.
|
||||
type: string
|
||||
volumeName:
|
||||
description: The name of the volume mount containing the env file.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- path
|
||||
- volumeName
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
@@ -2890,13 +2763,13 @@ spec:
|
||||
envFrom:
|
||||
description: |-
|
||||
List of sources to populate environment variables in the container.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
When a key exists in multiple
|
||||
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
will be reported as an event when the container is starting. When a key exists in multiple
|
||||
sources, the value associated with the last source will take precedence.
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -2916,9 +2789,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: |-
|
||||
Optional text to prepend to the name of each environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -3163,12 +3034,6 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: Probes are not allowed for ephemeral containers.
|
||||
@@ -3542,7 +3407,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -3594,51 +3459,9 @@ spec:
|
||||
description: |-
|
||||
Restart policy for the container to manage the restart behavior of each
|
||||
container within a pod.
|
||||
You cannot set this field on ephemeral containers.
|
||||
type: string
|
||||
restartPolicyRules:
|
||||
description: |-
|
||||
Represents a list of rules to be checked to determine if the
|
||||
container should be restarted on exit. You cannot set this field on
|
||||
This may only be set for init containers. You cannot set this field on
|
||||
ephemeral containers.
|
||||
items:
|
||||
description: ContainerRestartRule describes how a container exit is handled.
|
||||
properties:
|
||||
action:
|
||||
description: |-
|
||||
Specifies the action taken on a container exit if the requirements
|
||||
are satisfied. The only possible value is "Restart" to restart the
|
||||
container.
|
||||
type: string
|
||||
exitCodes:
|
||||
description: Represents the exit codes to check on container exits.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents the relationship between the container exit code(s) and the
|
||||
specified values. Possible values are:
|
||||
- In: the requirement is satisfied if the container exit code is in the
|
||||
set of specified values.
|
||||
- NotIn: the requirement is satisfied if the container exit code is
|
||||
not in the set of specified values.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
Specifies the set of values to check for container exit codes.
|
||||
At most 255 elements are allowed.
|
||||
items:
|
||||
format: int32
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
required:
|
||||
- operator
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
securityContext:
|
||||
description: |-
|
||||
Optional: SecurityContext defines the security options the ephemeral container should be run with.
|
||||
@@ -4157,9 +3980,7 @@ spec:
|
||||
hostNetwork:
|
||||
description: |-
|
||||
Host networking requested for this pod. Use the host's network namespace.
|
||||
When using HostNetwork you should specify ports so the scheduler is aware.
|
||||
When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
|
||||
and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
|
||||
If this option is set, the ports that will be used must be specified.
|
||||
Default to false.
|
||||
type: boolean
|
||||
hostPID:
|
||||
@@ -4184,19 +4005,6 @@ spec:
|
||||
Specifies the hostname of the Pod
|
||||
If not specified, the pod's hostname will be set to a system-defined value.
|
||||
type: string
|
||||
hostnameOverride:
|
||||
description: |-
|
||||
HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
|
||||
This field only specifies the pod's hostname and does not affect its DNS records.
|
||||
When this field is set to a non-empty string:
|
||||
- It takes precedence over the values set in `hostname` and `subdomain`.
|
||||
- The Pod's hostname will be set to this value.
|
||||
- `setHostnameAsFQDN` must be nil or set to false.
|
||||
- `hostNetwork` must be set to false.
|
||||
|
||||
This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
|
||||
Requires the HostnameOverride feature gate to be enabled.
|
||||
type: string
|
||||
imagePullSecrets:
|
||||
description: |-
|
||||
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
|
||||
@@ -4232,7 +4040,7 @@ spec:
|
||||
Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
|
||||
The resourceRequirements of an init container are taken into account during scheduling
|
||||
by finding the highest request/limit for each resource type, and then using the max of
|
||||
that value or the sum of the normal containers. Limits are applied to init containers
|
||||
of that value or the sum of the normal containers. Limits are applied to init containers
|
||||
in a similar fashion.
|
||||
Init containers cannot currently be added or removed.
|
||||
Cannot be updated.
|
||||
@@ -4276,9 +4084,7 @@ spec:
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
@@ -4332,42 +4138,6 @@ spec:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fileKeyRef:
|
||||
description: |-
|
||||
FileKeyRef selects a key of the env file.
|
||||
Requires the EnvFiles feature gate to be enabled.
|
||||
properties:
|
||||
key:
|
||||
description: |-
|
||||
The key within the env file. An invalid key will prevent the pod from starting.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
|
||||
type: string
|
||||
optional:
|
||||
default: false
|
||||
description: |-
|
||||
Specify whether the file or its key must be defined. If the file or key
|
||||
does not exist, then the env var is not published.
|
||||
If optional is set to true and the specified key does not exist,
|
||||
the environment variable will not be set in the Pod's containers.
|
||||
|
||||
If optional is set to false and the specified key does not exist,
|
||||
an error will be returned during Pod creation.
|
||||
type: boolean
|
||||
path:
|
||||
description: |-
|
||||
The path within the volume from which to select the file.
|
||||
Must be relative and may not contain the '..' path or start with '..'.
|
||||
type: string
|
||||
volumeName:
|
||||
description: The name of the volume mount containing the env file.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- path
|
||||
- volumeName
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
@@ -4423,13 +4193,13 @@ spec:
|
||||
envFrom:
|
||||
description: |-
|
||||
List of sources to populate environment variables in the container.
|
||||
The keys defined within a source may consist of any printable ASCII characters except '='.
|
||||
When a key exists in multiple
|
||||
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
will be reported as an event when the container is starting. When a key exists in multiple
|
||||
sources, the value associated with the last source will take precedence.
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -4449,9 +4219,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: |-
|
||||
Optional text to prepend to the name of each environment variable.
|
||||
May consist of any printable ASCII characters except '='.
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -4700,12 +4468,6 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: |-
|
||||
@@ -5096,7 +4858,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -5147,10 +4909,10 @@ spec:
|
||||
restartPolicy:
|
||||
description: |-
|
||||
RestartPolicy defines the restart behavior of individual containers in a pod.
|
||||
This overrides the pod-level restart policy. When this field is not specified,
|
||||
This field may only be set for init containers, and the only allowed value is "Always".
|
||||
For non-init containers or when this field is not specified,
|
||||
the restart behavior is defined by the Pod's restart policy and the container type.
|
||||
Additionally, setting the RestartPolicy as "Always" for the init container will
|
||||
have the following effect:
|
||||
Setting the RestartPolicy as "Always" for the init container will have the following effect:
|
||||
this init container will be continually restarted on
|
||||
exit until all regular containers have terminated. Once all regular
|
||||
containers have completed, all init containers with restartPolicy "Always"
|
||||
@@ -5162,57 +4924,6 @@ spec:
|
||||
init container is started, or after any startupProbe has successfully
|
||||
completed.
|
||||
type: string
|
||||
restartPolicyRules:
|
||||
description: |-
|
||||
Represents a list of rules to be checked to determine if the
|
||||
container should be restarted on exit. The rules are evaluated in
|
||||
order. Once a rule matches a container exit condition, the remaining
|
||||
rules are ignored. If no rule matches the container exit condition,
|
||||
the Container-level restart policy determines the whether the container
|
||||
is restarted or not. Constraints on the rules:
|
||||
- At most 20 rules are allowed.
|
||||
- Rules can have the same action.
|
||||
- Identical rules are not forbidden in validations.
|
||||
When rules are specified, container MUST set RestartPolicy explicitly
|
||||
even it if matches the Pod's RestartPolicy.
|
||||
items:
|
||||
description: ContainerRestartRule describes how a container exit is handled.
|
||||
properties:
|
||||
action:
|
||||
description: |-
|
||||
Specifies the action taken on a container exit if the requirements
|
||||
are satisfied. The only possible value is "Restart" to restart the
|
||||
container.
|
||||
type: string
|
||||
exitCodes:
|
||||
description: Represents the exit codes to check on container exits.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents the relationship between the container exit code(s) and the
|
||||
specified values. Possible values are:
|
||||
- In: the requirement is satisfied if the container exit code is in the
|
||||
set of specified values.
|
||||
- NotIn: the requirement is satisfied if the container exit code is
|
||||
not in the set of specified values.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
Specifies the set of values to check for container exit codes.
|
||||
At most 255 elements are allowed.
|
||||
items:
|
||||
format: int32
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
required:
|
||||
- operator
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
securityContext:
|
||||
description: |-
|
||||
SecurityContext defines the security options the container should be run with.
|
||||
@@ -5726,7 +5437,6 @@ spec:
|
||||
- spec.hostPID
|
||||
- spec.hostIPC
|
||||
- spec.hostUsers
|
||||
- spec.resources
|
||||
- spec.securityContext.appArmorProfile
|
||||
- spec.securityContext.seLinuxOptions
|
||||
- spec.securityContext.seccompProfile
|
||||
@@ -5878,7 +5588,7 @@ spec:
|
||||
description: |-
|
||||
Resources is the total amount of CPU and Memory resources required by all
|
||||
containers in the pod. It supports specifying Requests and Limits for
|
||||
"cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
|
||||
"cpu" and "memory" resource names only. ResourceClaims are not supported.
|
||||
|
||||
This field enables fine-grained control over resource allocation for the
|
||||
entire pod, allowing resource sharing among containers in a pod.
|
||||
@@ -5891,7 +5601,7 @@ spec:
|
||||
Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
that are used by this container.
|
||||
|
||||
This field depends on the
|
||||
This is an alpha field and requires enabling the
|
||||
DynamicResourceAllocation feature gate.
|
||||
|
||||
This field is immutable. It can only be set for containers.
|
||||
@@ -6416,6 +6126,7 @@ spec:
|
||||
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
|
||||
|
||||
If this value is nil, the behavior is equivalent to the Honor policy.
|
||||
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
type: string
|
||||
nodeTaintsPolicy:
|
||||
description: |-
|
||||
@@ -6426,6 +6137,7 @@ spec:
|
||||
- Ignore: node taints are ignored. All nodes are included.
|
||||
|
||||
If this value is nil, the behavior is equivalent to the Ignore policy.
|
||||
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
type: string
|
||||
topologyKey:
|
||||
description: |-
|
||||
@@ -7131,13 +6843,15 @@ spec:
|
||||
volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
|
||||
If specified, the CSI driver will create or update the volume with the attributes defined
|
||||
in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
|
||||
it can be changed after the claim is created. An empty string or nil value indicates that no
|
||||
VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
|
||||
this field can be reset to its previous value (including nil) to cancel the modification.
|
||||
it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
|
||||
will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
|
||||
If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
|
||||
will be set by the persistentvolume controller if it exists.
|
||||
If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
|
||||
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
|
||||
exists.
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
|
||||
(Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
|
||||
type: string
|
||||
volumeMode:
|
||||
description: |-
|
||||
@@ -7311,9 +7025,12 @@ spec:
|
||||
description: |-
|
||||
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
|
||||
Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
|
||||
More info: https://examples.k8s.io/volumes/glusterfs/README.md
|
||||
properties:
|
||||
endpoints:
|
||||
description: endpoints is the endpoint name that details Glusterfs topology.
|
||||
description: |-
|
||||
endpoints is the endpoint name that details Glusterfs topology.
|
||||
More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
|
||||
type: string
|
||||
path:
|
||||
description: |-
|
||||
@@ -7367,7 +7084,7 @@ spec:
|
||||
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
|
||||
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
|
||||
The volume will be mounted read-only (ro) and non-executable files (noexec).
|
||||
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
|
||||
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
|
||||
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
|
||||
properties:
|
||||
pullPolicy:
|
||||
@@ -7392,7 +7109,7 @@ spec:
|
||||
description: |-
|
||||
iscsi represents an ISCSI Disk resource that is attached to a
|
||||
kubelet's host machine and then exposed to the pod.
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
|
||||
More info: https://examples.k8s.io/volumes/iscsi/README.md
|
||||
properties:
|
||||
chapAuthDiscovery:
|
||||
description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
|
||||
@@ -7782,110 +7499,6 @@ spec:
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
podCertificate:
|
||||
description: |-
|
||||
Projects an auto-rotating credential bundle (private key and certificate
|
||||
chain) that the pod can use either as a TLS client or server.
|
||||
|
||||
Kubelet generates a private key and uses it to send a
|
||||
PodCertificateRequest to the named signer. Once the signer approves the
|
||||
request and issues a certificate chain, Kubelet writes the key and
|
||||
certificate chain to the pod filesystem. The pod does not start until
|
||||
certificates have been issued for each podCertificate projected volume
|
||||
source in its spec.
|
||||
|
||||
Kubelet will begin trying to rotate the certificate at the time indicated
|
||||
by the signer using the PodCertificateRequest.Status.BeginRefreshAt
|
||||
timestamp.
|
||||
|
||||
Kubelet can write a single file, indicated by the credentialBundlePath
|
||||
field, or separate files, indicated by the keyPath and
|
||||
certificateChainPath fields.
|
||||
|
||||
The credential bundle is a single file in PEM format. The first PEM
|
||||
entry is the private key (in PKCS#8 format), and the remaining PEM
|
||||
entries are the certificate chain issued by the signer (typically,
|
||||
signers will return their certificate chain in leaf-to-root order).
|
||||
|
||||
Prefer using the credential bundle format, since your application code
|
||||
can read it atomically. If you use keyPath and certificateChainPath,
|
||||
your application must make two separate file reads. If these coincide
|
||||
with a certificate rotation, it is possible that the private key and leaf
|
||||
certificate you read may not correspond to each other. Your application
|
||||
will need to check for this condition, and re-read until they are
|
||||
consistent.
|
||||
|
||||
The named signer controls chooses the format of the certificate it
|
||||
issues; consult the signer implementation's documentation to learn how to
|
||||
use the certificates it issues.
|
||||
properties:
|
||||
certificateChainPath:
|
||||
description: |-
|
||||
Write the certificate chain at this path in the projected volume.
|
||||
|
||||
Most applications should use credentialBundlePath. When using keyPath
|
||||
and certificateChainPath, your application needs to check that the key
|
||||
and leaf certificate are consistent, because it is possible to read the
|
||||
files mid-rotation.
|
||||
type: string
|
||||
credentialBundlePath:
|
||||
description: |-
|
||||
Write the credential bundle at this path in the projected volume.
|
||||
|
||||
The credential bundle is a single file that contains multiple PEM blocks.
|
||||
The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
|
||||
key.
|
||||
|
||||
The remaining blocks are CERTIFICATE blocks, containing the issued
|
||||
certificate chain from the signer (leaf and any intermediates).
|
||||
|
||||
Using credentialBundlePath lets your Pod's application code make a single
|
||||
atomic read that retrieves a consistent key and certificate chain. If you
|
||||
project them to separate files, your application code will need to
|
||||
additionally check that the leaf certificate was issued to the key.
|
||||
type: string
|
||||
keyPath:
|
||||
description: |-
|
||||
Write the key at this path in the projected volume.
|
||||
|
||||
Most applications should use credentialBundlePath. When using keyPath
|
||||
and certificateChainPath, your application needs to check that the key
|
||||
and leaf certificate are consistent, because it is possible to read the
|
||||
files mid-rotation.
|
||||
type: string
|
||||
keyType:
|
||||
description: |-
|
||||
The type of keypair Kubelet will generate for the pod.
|
||||
|
||||
Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
|
||||
"ECDSAP521", and "ED25519".
|
||||
type: string
|
||||
maxExpirationSeconds:
|
||||
description: |-
|
||||
maxExpirationSeconds is the maximum lifetime permitted for the
|
||||
certificate.
|
||||
|
||||
Kubelet copies this value verbatim into the PodCertificateRequests it
|
||||
generates for this projection.
|
||||
|
||||
If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
|
||||
will reject values shorter than 3600 (1 hour). The maximum allowable
|
||||
value is 7862400 (91 days).
|
||||
|
||||
The signer implementation is then free to issue a certificate with any
|
||||
lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
|
||||
seconds (1 hour). This constraint is enforced by kube-apiserver.
|
||||
`kubernetes.io` signers will never issue certificates with a lifetime
|
||||
longer than 24 hours.
|
||||
format: int32
|
||||
type: integer
|
||||
signerName:
|
||||
description: Kubelet's generated CSRs will be addressed to this signer.
|
||||
type: string
|
||||
required:
|
||||
- keyType
|
||||
- signerName
|
||||
type: object
|
||||
secret:
|
||||
description: secret information about the secret data to project
|
||||
properties:
|
||||
@@ -8015,6 +7628,7 @@ spec:
|
||||
description: |-
|
||||
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
|
||||
Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
|
||||
More info: https://examples.k8s.io/volumes/rbd/README.md
|
||||
properties:
|
||||
fsType:
|
||||
description: |-
|
||||
@@ -8556,13 +8170,15 @@ spec:
|
||||
volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
|
||||
If specified, the CSI driver will create or update the volume with the attributes defined
|
||||
in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
|
||||
it can be changed after the claim is created. An empty string or nil value indicates that no
|
||||
VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
|
||||
this field can be reset to its previous value (including nil) to cancel the modification.
|
||||
it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
|
||||
will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
|
||||
If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
|
||||
will be set by the persistentvolume controller if it exists.
|
||||
If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
|
||||
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
|
||||
exists.
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
|
||||
(Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
|
||||
type: string
|
||||
volumeMode:
|
||||
description: |-
|
||||
@@ -8662,11 +8278,13 @@ spec:
|
||||
description: |-
|
||||
currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
|
||||
When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
|
||||
This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
|
||||
type: string
|
||||
modifyVolumeStatus:
|
||||
description: |-
|
||||
ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
|
||||
When this is unset, there is no ModifyVolume operation being attempted.
|
||||
This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
|
||||
properties:
|
||||
status:
|
||||
description: "status is the status of the ControllerModifyVolume operation. It can be in any of following states:\n - Pending\n Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as\n the specified VolumeAttributesClass not existing.\n - InProgress\n InProgress indicates that the volume is being modified.\n - Infeasible\n Infeasible indicates that the request has been rejected as invalid by the CSI driver. To\n\t resolve the error, a valid VolumeAttributesClass needs to be specified.\nNote: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately."
|
||||
@@ -8737,6 +8355,7 @@ spec:
|
||||
type: object
|
||||
required:
|
||||
- selector
|
||||
- serviceName
|
||||
- template
|
||||
type: object
|
||||
status:
|
||||
@@ -8770,3 +8389,4 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -19,7 +19,6 @@ package actionsgithubcom
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -33,7 +32,6 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
|
||||
"github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
hash "github.com/actions/actions-runner-controller/hash"
|
||||
@@ -85,14 +83,14 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
}
|
||||
|
||||
log.Info("Deleting resources")
|
||||
requeue, err := r.cleanupResources(ctx, autoscalingListener, log)
|
||||
done, err := r.cleanupResources(ctx, autoscalingListener, log)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to cleanup resources after deletion")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if requeue {
|
||||
if !done {
|
||||
log.Info("Waiting for resources to be deleted before removing finalizer")
|
||||
return ctrl.Result{Requeue: true, RequeueAfter: time.Second}, nil
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
log.Info("Removing finalizer")
|
||||
@@ -130,24 +128,41 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
appConfig, err := r.GetAppConfig(ctx, &autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
log.Error(
|
||||
err,
|
||||
"Failed to get app config for AutoscalingRunnerSet.",
|
||||
"namespace",
|
||||
autoscalingRunnerSet.Namespace,
|
||||
"name",
|
||||
autoscalingRunnerSet.GitHubConfigSecret,
|
||||
)
|
||||
// Check if the GitHub config secret exists
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Spec.GitHubConfigSecret}, secret); err != nil {
|
||||
log.Error(err, "Failed to find GitHub config secret.",
|
||||
"namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
"name", autoscalingListener.Spec.GitHubConfigSecret)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Create a mirror secret in the same namespace as the AutoscalingListener
|
||||
mirrorSecret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerSecretMirrorName(autoscalingListener)}, mirrorSecret); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to get listener secret mirror", "namespace", autoscalingListener.Namespace, "name", scaleSetListenerSecretMirrorName(autoscalingListener))
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Create a mirror secret for the listener pod in the Controller namespace for listener pod to use
|
||||
log.Info("Creating a mirror listener secret for the listener pod")
|
||||
return r.createSecretsForListener(ctx, autoscalingListener, secret, log)
|
||||
}
|
||||
|
||||
// make sure the mirror secret is up to date
|
||||
mirrorSecretDataHash := mirrorSecret.Labels["secret-data-hash"]
|
||||
secretDataHash := hash.ComputeTemplateHash(secret.Data)
|
||||
if mirrorSecretDataHash != secretDataHash {
|
||||
log.Info("Updating mirror listener secret for the listener pod", "mirrorSecretDataHash", mirrorSecretDataHash, "secretDataHash", secretDataHash)
|
||||
return r.updateSecretsForListener(ctx, secret, mirrorSecret, log)
|
||||
}
|
||||
|
||||
// Make sure the runner scale set listener service account is created for the listener pod in the controller namespace
|
||||
serviceAccount := new(corev1.ServiceAccount)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: autoscalingListener.Name}, serviceAccount); err != nil {
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerServiceAccountName(autoscalingListener)}, serviceAccount); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to get listener service accounts", "namespace", autoscalingListener.Namespace, "name", autoscalingListener.Name)
|
||||
log.Error(err, "Unable to get listener service accounts", "namespace", autoscalingListener.Namespace, "name", scaleSetListenerServiceAccountName(autoscalingListener))
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
@@ -160,9 +175,9 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
|
||||
// Make sure the runner scale set listener role is created in the AutoscalingRunnerSet namespace
|
||||
listenerRole := new(rbacv1.Role)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Name}, listenerRole); err != nil {
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRole); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to get listener role", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", autoscalingListener.Name)
|
||||
log.Error(err, "Unable to get listener role", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", scaleSetListenerRoleName(autoscalingListener))
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
@@ -182,9 +197,9 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
|
||||
// Make sure the runner scale set listener role binding is created
|
||||
listenerRoleBinding := new(rbacv1.RoleBinding)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Name}, listenerRoleBinding); err != nil {
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRoleBinding); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to get listener role binding", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", autoscalingListener.Name)
|
||||
log.Error(err, "Unable to get listener role binding", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", scaleSetListenerRoleName(autoscalingListener))
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
@@ -224,7 +239,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
|
||||
// Create a listener pod in the controller namespace
|
||||
log.Info("Creating a listener pod")
|
||||
return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, appConfig, log)
|
||||
return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, mirrorSecret, log)
|
||||
}
|
||||
|
||||
cs := listenerContainerStatus(listenerPod)
|
||||
@@ -245,19 +260,6 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// delete the listener config secret as well, so it gets recreated when the listener pod is recreated, with any new data if it exists
|
||||
var configSecret corev1.Secret
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &configSecret)
|
||||
switch {
|
||||
case err == nil && configSecret.DeletionTimestamp.IsZero():
|
||||
log.Info("Deleting the listener config secret")
|
||||
if err := r.Delete(ctx, &configSecret); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to delete listener config secret: %w", err)
|
||||
}
|
||||
case !kerrors.IsNotFound(err):
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get the listener config secret: %w", err)
|
||||
}
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
case cs.State.Running != nil:
|
||||
@@ -273,7 +275,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (requeue bool, err error) {
|
||||
func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (done bool, err error) {
|
||||
logger.Info("Cleaning up the listener pod")
|
||||
listenerPod := new(corev1.Pod)
|
||||
err = r.Get(ctx, types.NamespacedName{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, listenerPod)
|
||||
@@ -285,7 +287,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
||||
return false, fmt.Errorf("failed to delete listener pod: %w", err)
|
||||
}
|
||||
}
|
||||
requeue = true
|
||||
return false, nil
|
||||
case kerrors.IsNotFound(err):
|
||||
_ = r.publishRunningListener(autoscalingListener, false) // If error is returned, we never published metrics so it is safe to ignore
|
||||
default:
|
||||
@@ -303,7 +305,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
||||
return false, fmt.Errorf("failed to delete listener config secret: %w", err)
|
||||
}
|
||||
}
|
||||
requeue = true
|
||||
return false, nil
|
||||
case !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener config secret: %w", err)
|
||||
}
|
||||
@@ -320,7 +322,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
||||
return false, fmt.Errorf("failed to delete listener proxy secret: %w", err)
|
||||
}
|
||||
}
|
||||
requeue = true
|
||||
return false, nil
|
||||
case !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener proxy secret: %w", err)
|
||||
}
|
||||
@@ -328,7 +330,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
||||
}
|
||||
|
||||
listenerRoleBinding := new(rbacv1.RoleBinding)
|
||||
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Name}, listenerRoleBinding)
|
||||
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRoleBinding)
|
||||
switch {
|
||||
case err == nil:
|
||||
if listenerRoleBinding.DeletionTimestamp.IsZero() {
|
||||
@@ -337,14 +339,14 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
||||
return false, fmt.Errorf("failed to delete listener role binding: %w", err)
|
||||
}
|
||||
}
|
||||
requeue = true
|
||||
return false, nil
|
||||
case !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener role binding: %w", err)
|
||||
}
|
||||
logger.Info("Listener role binding is deleted")
|
||||
|
||||
listenerRole := new(rbacv1.Role)
|
||||
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Name}, listenerRole)
|
||||
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRole)
|
||||
switch {
|
||||
case err == nil:
|
||||
if listenerRole.DeletionTimestamp.IsZero() {
|
||||
@@ -353,7 +355,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
||||
return false, fmt.Errorf("failed to delete listener role: %w", err)
|
||||
}
|
||||
}
|
||||
requeue = true
|
||||
return false, nil
|
||||
case !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener role: %w", err)
|
||||
}
|
||||
@@ -361,7 +363,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
||||
|
||||
logger.Info("Cleaning up the listener service account")
|
||||
listenerSa := new(corev1.ServiceAccount)
|
||||
err = r.Get(ctx, types.NamespacedName{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, listenerSa)
|
||||
err = r.Get(ctx, types.NamespacedName{Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace}, listenerSa)
|
||||
switch {
|
||||
case err == nil:
|
||||
if listenerSa.DeletionTimestamp.IsZero() {
|
||||
@@ -370,13 +372,13 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
||||
return false, fmt.Errorf("failed to delete listener service account: %w", err)
|
||||
}
|
||||
}
|
||||
requeue = true
|
||||
return false, nil
|
||||
case !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener service account: %w", err)
|
||||
}
|
||||
logger.Info("Listener service account is deleted")
|
||||
|
||||
return requeue, nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||
@@ -396,7 +398,7 @@ func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx cont
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, appConfig *appconfig.AppConfig, logger logr.Logger) (ctrl.Result, error) {
|
||||
func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
||||
var envs []corev1.EnvVar
|
||||
if autoscalingListener.Spec.Proxy != nil {
|
||||
httpURL := corev1.EnvVar{
|
||||
@@ -465,7 +467,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
|
||||
|
||||
logger.Info("Creating listener config secret")
|
||||
|
||||
podConfig, err := r.newScaleSetListenerConfig(autoscalingListener, appConfig, metricsConfig, cert)
|
||||
podConfig, err := r.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to build listener config secret")
|
||||
return ctrl.Result{}, err
|
||||
@@ -484,7 +486,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
newPod, err := r.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, metricsConfig, envs...)
|
||||
newPod, err := r.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to build listener pod")
|
||||
return ctrl.Result{}, err
|
||||
@@ -543,6 +545,23 @@ func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autosca
|
||||
return certificate, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
||||
newListenerSecret := r.newScaleSetListenerSecretMirror(autoscalingListener, secret)
|
||||
|
||||
if err := ctrl.SetControllerReference(autoscalingListener, newListenerSecret, r.Scheme); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
logger.Info("Creating listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
|
||||
if err := r.Create(ctx, newListenerSecret); err != nil {
|
||||
logger.Error(err, "Unable to create listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
logger.Info("Created listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||
data, err := autoscalingListener.Spec.Proxy.ToSecretData(func(s string) (*corev1.Secret, error) {
|
||||
var secret corev1.Secret
|
||||
@@ -582,6 +601,22 @@ func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, a
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Context, secret *corev1.Secret, mirrorSecret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
||||
dataHash := hash.ComputeTemplateHash(secret.Data)
|
||||
updatedMirrorSecret := mirrorSecret.DeepCopy()
|
||||
updatedMirrorSecret.Labels["secret-data-hash"] = dataHash
|
||||
updatedMirrorSecret.Data = secret.Data
|
||||
|
||||
logger.Info("Updating listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name, "hash", dataHash)
|
||||
if err := r.Update(ctx, updatedMirrorSecret); err != nil {
|
||||
logger.Error(err, "Unable to update listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
logger.Info("Updated listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name, "hash", dataHash)
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||
newRole := r.newScaleSetListenerRole(autoscalingListener)
|
||||
|
||||
|
||||
@@ -14,8 +14,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
ghalistenerconfig "github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
||||
"github.com/actions/actions-runner-controller/github/actions/fake"
|
||||
listenerconfig "github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -44,17 +43,10 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
|
||||
|
||||
rb := ResourceBuilder{
|
||||
SecretResolver: secretResolver,
|
||||
}
|
||||
|
||||
controller := &AutoscalingListenerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: rb,
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -142,25 +134,37 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer")
|
||||
|
||||
// Check if secret is created
|
||||
mirrorSecret := new(corev1.Secret)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerSecretMirrorName(autoscalingListener), Namespace: autoscalingListener.Namespace}, mirrorSecret)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(mirrorSecret.Data["github_token"]), nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerTestGitHubToken), "Mirror secret should be created")
|
||||
|
||||
// Check if service account is created
|
||||
serviceAccount := new(corev1.ServiceAccount)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, serviceAccount)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace}, serviceAccount)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return serviceAccount.Name, nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
).Should(BeEquivalentTo(autoscalingListener.Name), "Service account should be created")
|
||||
autoscalingListenerTestInterval).Should(BeEquivalentTo(scaleSetListenerServiceAccountName(autoscalingListener)), "Service account should be created")
|
||||
|
||||
// Check if role is created
|
||||
role := new(rbacv1.Role)
|
||||
Eventually(
|
||||
func() ([]rbacv1.PolicyRule, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -174,7 +178,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
roleBinding := new(rbacv1.RoleBinding)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -182,7 +186,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
return roleBinding.RoleRef.Name, nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListener.Name), "Rolebinding should be created")
|
||||
autoscalingListenerTestInterval).Should(BeEquivalentTo(scaleSetListenerRoleName(autoscalingListener)), "Rolebinding should be created")
|
||||
|
||||
// Check if pod is created
|
||||
pod := new(corev1.Pod)
|
||||
@@ -244,7 +248,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
Eventually(
|
||||
func() bool {
|
||||
roleBinding := new(rbacv1.RoleBinding)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding)
|
||||
return kerrors.IsNotFound(err)
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
@@ -255,7 +259,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
Eventually(
|
||||
func() bool {
|
||||
role := new(rbacv1.Role)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
|
||||
return kerrors.IsNotFound(err)
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
@@ -336,7 +340,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
role := new(rbacv1.Role)
|
||||
Eventually(
|
||||
func() ([]rbacv1.PolicyRule, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -347,7 +351,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
autoscalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{updated.Spec.EphemeralRunnerSetName})), "Role should be updated")
|
||||
})
|
||||
|
||||
It("It should re-create pod and config secret whenever listener container is terminated", func() {
|
||||
It("It should re-create pod whenever listener container is terminated", func() {
|
||||
// Waiting for the pod is created
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
@@ -363,18 +367,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
autoscalingListenerTestInterval,
|
||||
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
|
||||
|
||||
secret := new(corev1.Secret)
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerConfigName(autoscalingListener), Namespace: autoscalingListener.Namespace}, secret)
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
).Should(Succeed(), "Config secret should be created")
|
||||
|
||||
oldPodUID := string(pod.UID)
|
||||
oldSecretUID := string(secret.UID)
|
||||
|
||||
updated := pod.DeepCopy()
|
||||
updated.Status.ContainerStatuses = []corev1.ContainerStatus{
|
||||
{
|
||||
@@ -403,21 +396,75 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be re-created")
|
||||
})
|
||||
|
||||
// Check if config secret is re-created
|
||||
It("It should update mirror secrets to match secret used by AutoScalingRunnerSet", func() {
|
||||
// Waiting for the pod is created
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
secret := new(corev1.Secret)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerConfigName(autoscalingListener), Namespace: autoscalingListener.Namespace}, secret)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(secret.UID), nil
|
||||
return pod.Name, nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
).ShouldNot(BeEquivalentTo(oldSecretUID), "Config secret should be re-created")
|
||||
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
|
||||
|
||||
// Update the secret
|
||||
updatedSecret := configSecret.DeepCopy()
|
||||
updatedSecret.Data["github_token"] = []byte(autoscalingListenerTestGitHubToken + "_updated")
|
||||
err := k8sClient.Update(ctx, updatedSecret)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update test secret")
|
||||
|
||||
updatedPod := pod.DeepCopy()
|
||||
// Ignore status running and consult the container state
|
||||
updatedPod.Status.Phase = corev1.PodRunning
|
||||
updatedPod.Status.ContainerStatuses = []corev1.ContainerStatus{
|
||||
{
|
||||
Name: autoscalingListenerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = k8sClient.Status().Update(ctx, updatedPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update test pod to failed")
|
||||
|
||||
// Check if mirror secret is updated with right data
|
||||
mirrorSecret := new(corev1.Secret)
|
||||
Eventually(
|
||||
func() (map[string][]byte, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerSecretMirrorName(autoscalingListener), Namespace: autoscalingListener.Namespace}, mirrorSecret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mirrorSecret.Data, nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval).Should(BeEquivalentTo(updatedSecret.Data), "Mirror secret should be updated")
|
||||
|
||||
// Check if we re-created a new pod
|
||||
Eventually(
|
||||
func() error {
|
||||
latestPod := new(corev1.Pod)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, latestPod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if latestPod.UID == pod.UID {
|
||||
return fmt.Errorf("Pod should be recreated")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval).Should(Succeed(), "Pod should be recreated")
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -460,17 +507,10 @@ var _ = Describe("Test AutoScalingListener customization", func() {
|
||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
|
||||
|
||||
rb := ResourceBuilder{
|
||||
SecretResolver: secretResolver,
|
||||
}
|
||||
|
||||
controller := &AutoscalingListenerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: rb,
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -740,17 +780,11 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() {
|
||||
ctx = context.Background()
|
||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
|
||||
|
||||
rb := ResourceBuilder{
|
||||
SecretResolver: secretResolver,
|
||||
}
|
||||
|
||||
controller := &AutoscalingListenerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: rb,
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -943,17 +977,10 @@ var _ = Describe("Test AutoScalingListener controller with template modification
|
||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
|
||||
|
||||
rb := ResourceBuilder{
|
||||
SecretResolver: secretResolver,
|
||||
}
|
||||
|
||||
controller := &AutoscalingListenerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: rb,
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -1046,12 +1073,6 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
|
||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
|
||||
|
||||
rb := ResourceBuilder{
|
||||
SecretResolver: secretResolver,
|
||||
}
|
||||
|
||||
cert, err := os.ReadFile(filepath.Join(
|
||||
"../../",
|
||||
"github",
|
||||
@@ -1076,7 +1097,6 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: rb,
|
||||
}
|
||||
err = controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -1091,7 +1111,7 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
GitHubServerTLS: &v1alpha1.TLSConfig{
|
||||
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1127,7 +1147,7 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
|
||||
Spec: v1alpha1.AutoscalingListenerSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
GitHubServerTLS: &v1alpha1.TLSConfig{
|
||||
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1171,7 +1191,7 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
|
||||
|
||||
g.Expect(config.Data["config.json"]).ToNot(BeEmpty(), "listener configuration file should not be empty")
|
||||
|
||||
var listenerConfig ghalistenerconfig.Config
|
||||
var listenerConfig listenerconfig.Config
|
||||
err = json.Unmarshal(config.Data["config.json"], &listenerConfig)
|
||||
g.Expect(err).NotTo(HaveOccurred(), "failed to parse listener configuration file")
|
||||
|
||||
|
||||
@@ -151,7 +151,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if !v1alpha1.IsVersionAllowed(autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], build.Version) {
|
||||
if autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion] != build.Version {
|
||||
if err := r.Delete(ctx, autoscalingRunnerSet); err != nil {
|
||||
log.Error(err, "Failed to delete autoscaling runner set on version mismatch",
|
||||
"buildVersion", build.Version,
|
||||
@@ -207,6 +207,14 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log)
|
||||
}
|
||||
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, secret); err != nil {
|
||||
log.Error(err, "Failed to find GitHub config secret.",
|
||||
"namespace", autoscalingRunnerSet.Namespace,
|
||||
"name", autoscalingRunnerSet.Spec.GitHubConfigSecret)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
existingRunnerSets, err := r.listEphemeralRunnerSets(ctx, autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to list existing ephemeral runner sets")
|
||||
@@ -394,12 +402,12 @@ func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
||||
logger.Info("Creating a new runner scale set")
|
||||
actionsClient, err := r.GetActionsService(ctx, autoscalingRunnerSet)
|
||||
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
|
||||
if len(autoscalingRunnerSet.Spec.RunnerScaleSetName) == 0 {
|
||||
autoscalingRunnerSet.Spec.RunnerScaleSetName = autoscalingRunnerSet.Name
|
||||
}
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to initialize Actions service client for creating a new runner scale set", "error", err.Error())
|
||||
logger.Error(err, "Failed to initialize Actions service client for creating a new runner scale set")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
@@ -490,7 +498,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
actionsClient, err := r.GetActionsService(ctx, autoscalingRunnerSet)
|
||||
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to initialize Actions service client for updating a existing runner scale set")
|
||||
return ctrl.Result{}, err
|
||||
@@ -538,7 +546,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
actionsClient, err := r.GetActionsService(ctx, autoscalingRunnerSet)
|
||||
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to initialize Actions service client for updating a existing runner scale set")
|
||||
return ctrl.Result{}, err
|
||||
@@ -589,7 +597,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
|
||||
return nil
|
||||
}
|
||||
|
||||
actionsClient, err := r.GetActionsService(ctx, autoscalingRunnerSet)
|
||||
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to initialize Actions service client for updating a existing runner scale set")
|
||||
return err
|
||||
@@ -668,6 +676,74 @@ func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Con
|
||||
return &EphemeralRunnerSets{list: list}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) actionsClientFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (actions.ActionsService, error) {
|
||||
var configSecret corev1.Secret
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, &configSecret); err != nil {
|
||||
return nil, fmt.Errorf("failed to find GitHub config secret: %w", err)
|
||||
}
|
||||
|
||||
opts, err := r.actionsClientOptionsFor(ctx, autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get actions client options: %w", err)
|
||||
}
|
||||
|
||||
return r.ActionsClient.GetClientFromSecret(
|
||||
ctx,
|
||||
autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||
autoscalingRunnerSet.Namespace,
|
||||
configSecret.Data,
|
||||
opts...,
|
||||
)
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) ([]actions.ClientOption, error) {
|
||||
var options []actions.ClientOption
|
||||
|
||||
if autoscalingRunnerSet.Spec.Proxy != nil {
|
||||
proxyFunc, err := autoscalingRunnerSet.Spec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
|
||||
var secret corev1.Secret
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: s}, &secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get proxy secret %s: %w", s, err)
|
||||
}
|
||||
|
||||
return &secret, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get proxy func: %w", err)
|
||||
}
|
||||
|
||||
options = append(options, actions.WithProxy(proxyFunc))
|
||||
}
|
||||
|
||||
tlsConfig := autoscalingRunnerSet.Spec.GitHubServerTLS
|
||||
if tlsConfig != nil {
|
||||
pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) {
|
||||
var configmap corev1.ConfigMap
|
||||
err := r.Get(
|
||||
ctx,
|
||||
types.NamespacedName{
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Name: name,
|
||||
},
|
||||
&configmap,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get configmap %s: %w", name, err)
|
||||
}
|
||||
|
||||
return []byte(configmap.Data[key]), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get tls config: %w", err)
|
||||
}
|
||||
|
||||
options = append(options, actions.WithRootCAs(pool))
|
||||
}
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
|
||||
@@ -70,12 +70,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -682,7 +677,13 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
|
||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
multiClient := fake.NewMultiClient(
|
||||
controller := &AutoscalingRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ActionsClient: fake.NewMultiClient(
|
||||
fake.WithDefaultClient(
|
||||
fake.NewFakeClient(
|
||||
fake.WithUpdateRunnerScaleSet(
|
||||
@@ -702,20 +703,7 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
|
||||
),
|
||||
nil,
|
||||
),
|
||||
)
|
||||
|
||||
controller := &AutoscalingRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: multiClient,
|
||||
},
|
||||
},
|
||||
),
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -830,12 +818,7 @@ var _ = Describe("Test AutoscalingController creation failures", Ordered, func()
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -954,19 +937,14 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
|
||||
ctx = context.Background()
|
||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
multiClient := actions.NewMultiClient(logr.Discard())
|
||||
|
||||
controller = &AutoscalingRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: multiClient,
|
||||
},
|
||||
},
|
||||
ActionsClient: actions.NewMultiClient(logr.Discard()),
|
||||
}
|
||||
|
||||
err := controller.SetupWithManager(mgr)
|
||||
@@ -1149,12 +1127,7 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
err = controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -1163,10 +1136,7 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
|
||||
})
|
||||
|
||||
It("should be able to make requests to a server using root CAs", func() {
|
||||
controller.SecretResolver = &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: actions.NewMultiClient(logr.Discard()),
|
||||
}
|
||||
controller.ActionsClient = actions.NewMultiClient(logr.Discard())
|
||||
|
||||
certsFolder := filepath.Join(
|
||||
"../../",
|
||||
@@ -1201,7 +1171,7 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: server.ConfigURLForOrg("my-org"),
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
GitHubServerTLS: &v1alpha1.TLSConfig{
|
||||
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1254,7 +1224,7 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
GitHubServerTLS: &v1alpha1.TLSConfig{
|
||||
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1318,7 +1288,7 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
GitHubServerTLS: &v1alpha1.TLSConfig{
|
||||
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1391,12 +1361,7 @@ var _ = Describe("Test external permissions cleanup", Ordered, func() {
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -1554,12 +1519,7 @@ var _ = Describe("Test external permissions cleanup", Ordered, func() {
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -1767,12 +1727,7 @@ var _ = Describe("Test resource version and build version mismatch", func() {
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
|
||||
@@ -21,8 +21,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
@@ -30,7 +28,6 @@ import (
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
@@ -49,22 +46,10 @@ type EphemeralRunnerReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
ActionsClient actions.MultiClient
|
||||
ResourceBuilder
|
||||
}
|
||||
|
||||
// precompute backoff durations for failed ephemeral runners
|
||||
// the len(failedRunnerBackoff) must be equal to maxFailures + 1
|
||||
var failedRunnerBackoff = []time.Duration{
|
||||
0,
|
||||
5 * time.Second,
|
||||
10 * time.Second,
|
||||
20 * time.Second,
|
||||
40 * time.Second,
|
||||
80 * time.Second,
|
||||
}
|
||||
|
||||
const maxFailures = 5
|
||||
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -181,136 +166,58 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if ephemeralRunner.Status.RunnerId == 0 {
|
||||
log.Info("Creating new ephemeral runner registration and updating status with runner config")
|
||||
if r, err := r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log); r != nil {
|
||||
return *r, err
|
||||
}
|
||||
}
|
||||
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, req.NamespacedName, secret); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Failed to fetch secret")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
jitConfig, err := r.createRunnerJitConfig(ctx, ephemeralRunner, log)
|
||||
switch {
|
||||
case err == nil:
|
||||
// create secret if not created
|
||||
log.Info("Creating new ephemeral runner secret for jitconfig.")
|
||||
jitSecret, err := r.createSecret(ctx, ephemeralRunner, jitConfig, log)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to create secret: %w", err)
|
||||
if r, err := r.createSecret(ctx, ephemeralRunner, log); r != nil {
|
||||
return *r, err
|
||||
}
|
||||
log.Info("Created new ephemeral runner secret for jitconfig.")
|
||||
secret = jitSecret
|
||||
|
||||
case errors.Is(err, retryableError):
|
||||
log.Info("Encountered retryable error, requeueing", "error", err.Error())
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
case errors.Is(err, fatalError):
|
||||
log.Info("JIT config cannot be created for this ephemeral runner, issuing delete", "error", err.Error())
|
||||
if err := r.Delete(ctx, ephemeralRunner); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to delete the ephemeral runner: %w", err)
|
||||
}
|
||||
log.Info("Request to delete ephemeral runner has been issued")
|
||||
return ctrl.Result{}, nil
|
||||
default:
|
||||
log.Error(err, "Failed to create ephemeral runners secret", "error", err.Error())
|
||||
// Retry to get the secret that was just created.
|
||||
// Otherwise, even though we want to continue to create the pod,
|
||||
// it fails due to the missing secret resulting in an invalid pod spec.
|
||||
if err := r.Get(ctx, req.NamespacedName, secret); err != nil {
|
||||
log.Error(err, "Failed to fetch secret")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if ephemeralRunner.Status.RunnerId == 0 {
|
||||
log.Info("Updating ephemeral runner status with runnerId and runnerName")
|
||||
runnerID, err := strconv.Atoi(string(secret.Data["runnerId"]))
|
||||
if err != nil {
|
||||
log.Error(err, "Runner config secret is corrupted: missing runnerId")
|
||||
log.Info("Deleting corrupted runner config secret")
|
||||
if err := r.Delete(ctx, secret); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to delete the corrupted runner config secret")
|
||||
}
|
||||
log.Info("Corrupted runner config secret has been deleted")
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
runnerName := string(secret.Data["runnerName"])
|
||||
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||
obj.Status.RunnerId = runnerID
|
||||
obj.Status.RunnerName = runnerName
|
||||
}); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %w", err)
|
||||
}
|
||||
ephemeralRunner.Status.RunnerId = runnerID
|
||||
ephemeralRunner.Status.RunnerName = runnerName
|
||||
log.Info("Updated ephemeral runner status with runnerId and runnerName")
|
||||
}
|
||||
|
||||
if len(ephemeralRunner.Status.Failures) > maxFailures {
|
||||
log.Info(fmt.Sprintf("EphemeralRunner has failed more than %d times. Deleting ephemeral runner so it can be re-created", maxFailures))
|
||||
if err := r.Delete(ctx, ephemeralRunner); err != nil {
|
||||
log.Error(fmt.Errorf("failed to delete ephemeral runner after %d failures: %w", maxFailures, err), "Failed to delete ephemeral runner")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
now := metav1.Now()
|
||||
lastFailure := ephemeralRunner.Status.LastFailure()
|
||||
backoffDuration := failedRunnerBackoff[len(ephemeralRunner.Status.Failures)]
|
||||
nextReconciliation := lastFailure.Add(backoffDuration)
|
||||
if !lastFailure.IsZero() && now.Before(&metav1.Time{Time: nextReconciliation}) {
|
||||
requeueAfter := nextReconciliation.Sub(now.Time)
|
||||
log.Info("Backing off the next reconciliation due to failure",
|
||||
"lastFailure", lastFailure,
|
||||
"nextReconciliation", nextReconciliation,
|
||||
"requeueAfter", requeueAfter,
|
||||
)
|
||||
return ctrl.Result{
|
||||
Requeue: true,
|
||||
RequeueAfter: requeueAfter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
pod := new(corev1.Pod)
|
||||
if err := r.Get(ctx, req.NamespacedName, pod); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
switch {
|
||||
case !kerrors.IsNotFound(err):
|
||||
log.Error(err, "Failed to fetch the pod")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
log.Info("Ephemeral runner pod does not exist. Creating new ephemeral runner")
|
||||
|
||||
result, err := r.createPod(ctx, ephemeralRunner, secret, log)
|
||||
switch {
|
||||
case err == nil:
|
||||
return result, nil
|
||||
case kerrors.IsAlreadyExists(err):
|
||||
log.Info("Runner pod already exists. Waiting for the pod event to be received")
|
||||
return ctrl.Result{Requeue: true, RequeueAfter: 5 * time.Second}, nil
|
||||
case kerrors.IsInvalid(err):
|
||||
log.Error(err, "Failed to create a pod due to unrecoverable failure")
|
||||
errMessage := fmt.Sprintf("Failed to create the pod: %v", err)
|
||||
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonInvalidPodFailure, log); err != nil {
|
||||
case len(ephemeralRunner.Status.Failures) > 5:
|
||||
log.Info("EphemeralRunner has failed more than 5 times. Marking it as failed")
|
||||
errMessage := fmt.Sprintf("Pod has failed to start more than 5 times: %s", pod.Status.Message)
|
||||
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonTooManyPodFailures, log); err != nil {
|
||||
log.Error(err, "Failed to set ephemeral runner to phase Failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
case kerrors.IsForbidden(err):
|
||||
if status, ok := err.(kerrors.APIStatus); ok || errors.As(err, &status) {
|
||||
isResourceQuotaExceeded := strings.Contains(status.Status().Message, "exceeded quota:")
|
||||
isAboutToExpire := ephemeralRunner.CreationTimestamp.Time.Add(10 * time.Minute).Before(time.Now())
|
||||
switch {
|
||||
case isResourceQuotaExceeded && isAboutToExpire:
|
||||
log.Error(err, "Failed to create a pod due to resource quota exceeded and the ephemeral runner is about to expire; re-creating the ephemeral runner")
|
||||
if err := r.Delete(ctx, ephemeralRunner); err != nil {
|
||||
log.Error(err, "Failed to delete the ephemeral runner")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
case isResourceQuotaExceeded:
|
||||
log.Error(err, "Resource quota is exceeded; requeue in 30s to retry pod creation")
|
||||
return ctrl.Result{RequeueAfter: 30 * time.Second}, nil
|
||||
|
||||
default:
|
||||
// other forbidden errors
|
||||
// fallthrough to the default handling below
|
||||
}
|
||||
}
|
||||
// Pod was not found. Create if the pod has never been created
|
||||
log.Info("Creating new EphemeralRunner pod.")
|
||||
result, err := r.createPod(ctx, ephemeralRunner, secret, log)
|
||||
switch {
|
||||
case err == nil:
|
||||
return result, nil
|
||||
case kerrors.IsInvalid(err) || kerrors.IsForbidden(err):
|
||||
log.Error(err, "Failed to create a pod due to unrecoverable failure")
|
||||
errMessage := fmt.Sprintf("Failed to create the pod: %v", err)
|
||||
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonInvalidPodFailure, log); err != nil {
|
||||
@@ -323,6 +230,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cs := runnerContainerStatus(pod)
|
||||
switch {
|
||||
@@ -354,41 +262,34 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
|
||||
case cs.State.Terminated.ExitCode != 0: // failed
|
||||
log.Info("Ephemeral runner container failed", "exitCode", cs.State.Terminated.ExitCode)
|
||||
if ephemeralRunner.HasJob() {
|
||||
log.Error(
|
||||
errors.New("ephemeral runner has a job assigned, but the pod has failed"),
|
||||
"Ephemeral runner either has faulty entrypoint or something external killing the runner",
|
||||
)
|
||||
log.Info("Deleting the ephemeral runner that has a job assigned but the pod has failed")
|
||||
if err := r.Delete(ctx, ephemeralRunner); err != nil {
|
||||
log.Error(err, "Failed to delete the ephemeral runner that has a job assigned but the pod has failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Deleted the ephemeral runner that has a job assigned but the pod has failed")
|
||||
log.Info("Trying to remove the runner from the service")
|
||||
actionsClient, err := r.GetActionsService(ctx, ephemeralRunner)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to get actions client for removing the runner from the service")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil {
|
||||
log.Error(err, "Failed to remove the runner from the service")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
log.Info("Removed the runner from the service")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
|
||||
log.Error(err, "Failed to delete runner pod on failure")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
|
||||
default: // succeeded
|
||||
log.Info("Ephemeral runner has finished successfully, deleting ephemeral runner", "exitCode", cs.State.Terminated.ExitCode)
|
||||
if err := r.Delete(ctx, ephemeralRunner); err != nil {
|
||||
log.Error(err, "Failed to delete ephemeral runner after successful completion")
|
||||
default:
|
||||
// pod succeeded. We double-check with the service if the runner exists.
|
||||
// The reason is that image can potentially finish with status 0, but not pick up the job.
|
||||
existsInService, err := r.runnerRegisteredWithService(ctx, ephemeralRunner.DeepCopy(), log)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to check if runner is registered with the service")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if !existsInService {
|
||||
// the runner does not exist in the service, so it must be done
|
||||
log.Info("Ephemeral runner has finished since it does not exist in the service anymore")
|
||||
if err := r.markAsFinished(ctx, ephemeralRunner, log); err != nil {
|
||||
log.Error(err, "Failed to mark ephemeral runner as finished")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// The runner still exists. This can happen if the pod exited with 0 but fails to start
|
||||
log.Info("Ephemeral runner pod has finished, but the runner still exists in the service. Deleting the pod to restart it.")
|
||||
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
|
||||
log.Error(err, "failed to delete a pod that still exists in the service")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
@@ -558,6 +459,18 @@ func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralR
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
|
||||
log.Info("Updating ephemeral runner status to Finished")
|
||||
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||
obj.Status.Phase = corev1.PodSucceeded
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to update ephemeral runner with status finished: %w", err)
|
||||
}
|
||||
|
||||
log.Info("EphemeralRunner status is marked as Finished")
|
||||
return nil
|
||||
}
|
||||
|
||||
// deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count.
|
||||
// It should not be responsible for setting the status to Failed.
|
||||
func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
|
||||
@@ -571,9 +484,9 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
|
||||
log.Info("Updating ephemeral runner status to track the failure count")
|
||||
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||
if obj.Status.Failures == nil {
|
||||
obj.Status.Failures = make(map[string]metav1.Time)
|
||||
obj.Status.Failures = make(map[string]bool)
|
||||
}
|
||||
obj.Status.Failures[string(pod.UID)] = metav1.Now()
|
||||
obj.Status.Failures[string(pod.UID)] = true
|
||||
obj.Status.Ready = false
|
||||
obj.Status.Reason = pod.Status.Reason
|
||||
obj.Status.Message = pod.Status.Message
|
||||
@@ -585,12 +498,14 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) createRunnerJitConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (*actions.RunnerScaleSetJitRunnerConfig, error) {
|
||||
// updateStatusWithRunnerConfig fetches runtime configuration needed by the runner
|
||||
// This method should always set .status.runnerId and .status.runnerJITConfig
|
||||
func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
|
||||
// Runner is not registered with the service. We need to register it first
|
||||
log.Info("Creating ephemeral runner JIT config")
|
||||
actionsClient, err := r.GetActionsService(ctx, ephemeralRunner)
|
||||
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunner)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get actions client for generating JIT config: %w", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to get actions client for generating JIT config: %w", err)
|
||||
}
|
||||
|
||||
jitSettings := &actions.RunnerScaleSetJitRunnerSetting{
|
||||
@@ -605,19 +520,15 @@ func (r *EphemeralRunnerReconciler) createRunnerJitConfig(ctx context.Context, e
|
||||
}
|
||||
|
||||
jitConfig, err := actionsClient.GenerateJitRunnerConfig(ctx, jitSettings, ephemeralRunner.Spec.RunnerScaleSetId)
|
||||
if err == nil { // if NO error
|
||||
log.Info("Created ephemeral runner JIT config", "runnerId", jitConfig.Runner.Id)
|
||||
return jitConfig, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
actionsError := &actions.ActionsError{}
|
||||
if !errors.As(err, &actionsError) {
|
||||
return nil, fmt.Errorf("failed to generate JIT config with generic error: %w", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with generic error: %w", err)
|
||||
}
|
||||
|
||||
if actionsError.StatusCode != http.StatusConflict ||
|
||||
!actionsError.IsException("AgentExistsException") {
|
||||
return nil, fmt.Errorf("failed to generate JIT config with Actions service error: %w", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %w", err)
|
||||
}
|
||||
|
||||
// If the runner with the name we want already exists it means:
|
||||
@@ -630,12 +541,12 @@ func (r *EphemeralRunnerReconciler) createRunnerJitConfig(ctx context.Context, e
|
||||
log.Info("Getting runner jit config failed with conflict error, trying to get the runner by name", "runnerName", ephemeralRunner.Name)
|
||||
existingRunner, err := actionsClient.GetRunnerByName(ctx, ephemeralRunner.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get runner by name: %w", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to get runner by name: %w", err)
|
||||
}
|
||||
|
||||
if existingRunner == nil {
|
||||
log.Info("Runner with the same name does not exist anymore, re-queuing the reconciliation")
|
||||
return nil, fmt.Errorf("%w: runner existed, retry configuration", retryableError)
|
||||
log.Info("Runner with the same name does not exist, re-queuing the reconciliation")
|
||||
return &ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
log.Info("Found the runner with the same name", "runnerId", existingRunner.Id, "runnerScaleSetId", existingRunner.RunnerScaleSetId)
|
||||
@@ -643,14 +554,40 @@ func (r *EphemeralRunnerReconciler) createRunnerJitConfig(ctx context.Context, e
|
||||
log.Info("Removing the runner with the same name")
|
||||
err := actionsClient.RemoveRunner(ctx, int64(existingRunner.Id))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to remove runner from the service: %w", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to remove runner from the service: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Removed the runner with the same name, re-queuing the reconciliation")
|
||||
return nil, fmt.Errorf("%w: runner existed belonging to the scale set, retry configuration", retryableError)
|
||||
return &ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("%w: runner with the same name but doesn't belong to this RunnerScaleSet: %w", fatalError, err)
|
||||
// TODO: Do we want to mark the ephemeral runner as failed, and let EphemeralRunnerSet to clean it up, so we can recover from this situation?
|
||||
// The situation is that the EphemeralRunner's name is already used by something else to register a runner, and we can't take the control back.
|
||||
return &ctrl.Result{}, fmt.Errorf("runner with the same name but doesn't belong to this RunnerScaleSet: %w", err)
|
||||
}
|
||||
log.Info("Created ephemeral runner JIT config", "runnerId", jitConfig.Runner.Id)
|
||||
|
||||
log.Info("Updating ephemeral runner status with runnerId and runnerJITConfig")
|
||||
err = patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||
obj.Status.RunnerId = jitConfig.Runner.Id
|
||||
obj.Status.RunnerName = jitConfig.Runner.Name
|
||||
obj.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
|
||||
})
|
||||
if err != nil {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %w", err)
|
||||
}
|
||||
|
||||
// We want to continue without a requeue for faster pod creation.
|
||||
//
|
||||
// To do so, we update the status in-place, so that both continuing the loop and
|
||||
// and requeuing and skipping updateStatusWithRunnerConfig in the next loop, will
|
||||
// have the same effect.
|
||||
ephemeralRunner.Status.RunnerId = jitConfig.Runner.Id
|
||||
ephemeralRunner.Status.RunnerName = jitConfig.Runner.Name
|
||||
ephemeralRunner.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
|
||||
|
||||
log.Info("Updated ephemeral runner status with runnerId and runnerJITConfig")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, log logr.Logger) (ctrl.Result, error) {
|
||||
@@ -726,21 +663,21 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, jitConfig *actions.RunnerScaleSetJitRunnerConfig, log logr.Logger) (*corev1.Secret, error) {
|
||||
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
|
||||
log.Info("Creating new secret for ephemeral runner")
|
||||
jitSecret := r.newEphemeralRunnerJitSecret(runner, jitConfig)
|
||||
jitSecret := r.newEphemeralRunnerJitSecret(runner)
|
||||
|
||||
if err := ctrl.SetControllerReference(runner, jitSecret, r.Scheme); err != nil {
|
||||
return nil, fmt.Errorf("failed to set controller reference: %w", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to set controller reference: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Created new secret spec for ephemeral runner")
|
||||
if err := r.Create(ctx, jitSecret); err != nil {
|
||||
return nil, fmt.Errorf("failed to create jit secret: %w", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to create jit secret: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Created ephemeral runner secret", "secretName", jitSecret.Name)
|
||||
return jitSecret, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// updateRunStatusFromPod is responsible for updating non-exiting statuses.
|
||||
@@ -790,8 +727,104 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) actionsClientFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) (actions.ActionsService, error) {
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: runner.Namespace, Name: runner.Spec.GitHubConfigSecret}, secret); err != nil {
|
||||
return nil, fmt.Errorf("failed to get secret: %w", err)
|
||||
}
|
||||
|
||||
opts, err := r.actionsClientOptionsFor(ctx, runner)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get actions client options: %w", err)
|
||||
}
|
||||
|
||||
return r.ActionsClient.GetClientFromSecret(
|
||||
ctx,
|
||||
runner.Spec.GitHubConfigUrl,
|
||||
runner.Namespace,
|
||||
secret.Data,
|
||||
opts...,
|
||||
)
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) actionsClientOptionsFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) ([]actions.ClientOption, error) {
|
||||
var opts []actions.ClientOption
|
||||
if runner.Spec.Proxy != nil {
|
||||
proxyFunc, err := runner.Spec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
|
||||
var secret corev1.Secret
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: runner.Namespace, Name: s}, &secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get proxy secret %s: %w", s, err)
|
||||
}
|
||||
|
||||
return &secret, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get proxy func: %w", err)
|
||||
}
|
||||
|
||||
opts = append(opts, actions.WithProxy(proxyFunc))
|
||||
}
|
||||
|
||||
tlsConfig := runner.Spec.GitHubServerTLS
|
||||
if tlsConfig != nil {
|
||||
pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) {
|
||||
var configmap corev1.ConfigMap
|
||||
err := r.Get(
|
||||
ctx,
|
||||
types.NamespacedName{
|
||||
Namespace: runner.Namespace,
|
||||
Name: name,
|
||||
},
|
||||
&configmap,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get configmap %s: %w", name, err)
|
||||
}
|
||||
|
||||
return []byte(configmap.Data[key]), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get tls config: %w", err)
|
||||
}
|
||||
|
||||
opts = append(opts, actions.WithRootCAs(pool))
|
||||
}
|
||||
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
// runnerRegisteredWithService checks if the runner is still registered with the service
|
||||
// Returns found=false and err=nil if ephemeral runner does not exist in GitHub service and should be deleted
|
||||
func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (found bool, err error) {
|
||||
actionsClient, err := r.actionsClientFor(ctx, runner)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get Actions client for ScaleSet: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Checking if runner exists in GitHub service", "runnerId", runner.Status.RunnerId)
|
||||
_, err = actionsClient.GetRunner(ctx, int64(runner.Status.RunnerId))
|
||||
if err != nil {
|
||||
actionsError := &actions.ActionsError{}
|
||||
if !errors.As(err, &actionsError) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if actionsError.StatusCode != http.StatusNotFound ||
|
||||
!actionsError.IsException("AgentNotFoundException") {
|
||||
return false, fmt.Errorf("failed to check if runner exists in GitHub service: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Runner does not exist in GitHub service", "runnerId", runner.Status.RunnerId)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Info("Runner exists in GitHub service", "runnerId", runner.Status.RunnerId)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
|
||||
client, err := r.GetActionsService(ctx, ephemeralRunner)
|
||||
client, err := r.actionsClientFor(ctx, ephemeralRunner)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get actions client for runner: %w", err)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
|
||||
const (
|
||||
ephemeralRunnerTimeout = time.Second * 20
|
||||
ephemeralRunnerInterval = time.Millisecond * 10
|
||||
ephemeralRunnerInterval = time.Millisecond * 250
|
||||
runnerImage = "ghcr.io/actions/actions-runner:latest"
|
||||
)
|
||||
|
||||
@@ -110,12 +110,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: mgr.GetClient(),
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
|
||||
err := controller.SetupWithManager(mgr)
|
||||
@@ -176,7 +171,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
).Should(BeEquivalentTo(ephemeralRunner.Name))
|
||||
})
|
||||
|
||||
It("It should re-create pod on failure and no job assigned", func() {
|
||||
It("It should re-create pod on failure", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(func() (bool, error) {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
@@ -200,67 +195,6 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
It("It should delete ephemeral runner on failure and job assigned", func() {
|
||||
er := new(v1alpha1.EphemeralRunner)
|
||||
// Check if finalizer is added
|
||||
Eventually(
|
||||
func() error {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
|
||||
return err
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(Succeed(), "failed to get ephemeral runner")
|
||||
|
||||
// update job id to simulate job assigned
|
||||
er.Status.JobID = "1"
|
||||
err := k8sClient.Status().Update(ctx, er)
|
||||
Expect(err).To(BeNil(), "failed to update ephemeral runner status")
|
||||
|
||||
er = new(v1alpha1.EphemeralRunner)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return er.Status.JobID, nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo("1"))
|
||||
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(func() (bool, error) {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}).Should(BeEquivalentTo(true))
|
||||
|
||||
// delete pod to simulate failure
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: v1alpha1.EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 1,
|
||||
},
|
||||
},
|
||||
})
|
||||
err = k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||
|
||||
er = new(v1alpha1.EphemeralRunner)
|
||||
Eventually(
|
||||
func() bool {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
|
||||
return kerrors.IsNotFound(err)
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
|
||||
})
|
||||
|
||||
It("It should failed if a pod template is invalid", func() {
|
||||
invalideEphemeralRunner := newExampleRunner("invalid-ephemeral-runner", autoscalingNS.Name, configSecret.Name)
|
||||
invalideEphemeralRunner.Spec.Spec.PriorityClassName = "notexist"
|
||||
@@ -269,22 +203,13 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(
|
||||
func() (corev1.PodPhase, error) {
|
||||
err := k8sClient.Get(
|
||||
ctx,
|
||||
client.ObjectKey{Name: invalideEphemeralRunner.Name, Namespace: invalideEphemeralRunner.Namespace},
|
||||
updated,
|
||||
)
|
||||
Eventually(func() (corev1.PodPhase, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: invalideEphemeralRunner.Name, Namespace: invalideEphemeralRunner.Namespace}, updated)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
return updated.Status.Phase, nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(corev1.PodFailed))
|
||||
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(corev1.PodFailed))
|
||||
Expect(updated.Status.Reason).Should(Equal("InvalidPod"))
|
||||
Expect(updated.Status.Message).Should(Equal("Failed to create the pod: pods \"invalid-ephemeral-runner\" is forbidden: no PriorityClass with name notexist was found"))
|
||||
})
|
||||
@@ -603,26 +528,44 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
).Should(BeEquivalentTo(""))
|
||||
})
|
||||
|
||||
It("It should eventually delete ephemeral runner after consecutive failures", func() {
|
||||
It("It should not re-create pod indefinitely", func() {
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(Succeed(), "failed to get ephemeral runner")
|
||||
|
||||
failEphemeralRunnerPod := func() *corev1.Pod {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: updated.Name, Namespace: updated.Namespace}, pod)
|
||||
func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
|
||||
if err != nil {
|
||||
if kerrors.IsNotFound(err) && len(updated.Status.Failures) > 5 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: v1alpha1.EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 1,
|
||||
},
|
||||
},
|
||||
})
|
||||
err = k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||
return false, fmt.Errorf("pod haven't failed for 5 times.")
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(Succeed(), "failed to get ephemeral runner pod")
|
||||
).Should(BeEquivalentTo(true), "we should stop creating pod after 5 failures")
|
||||
|
||||
// In case we still have pod created due to controller-runtime cache delay, mark the container as exited
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
|
||||
if err == nil {
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: v1alpha1.EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
@@ -633,70 +576,25 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
})
|
||||
err := k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
for i := range 5 {
|
||||
pod := failEphemeralRunnerPod()
|
||||
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
// EphemeralRunner should failed with reason TooManyPodFailures
|
||||
Eventually(func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return "", err
|
||||
}
|
||||
return len(updated.Status.Failures), nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(i + 1))
|
||||
return updated.Status.Reason, nil
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo("TooManyPodFailures"), "Reason should be TooManyPodFailures")
|
||||
|
||||
Eventually(
|
||||
func() error {
|
||||
nextPod := new(corev1.Pod)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: pod.Name, Namespace: pod.Namespace}, nextPod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if nextPod.UID != pod.UID {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("pod not recreated")
|
||||
},
|
||||
).WithTimeout(20*time.Second).WithPolling(10*time.Millisecond).Should(Succeed(), "pod should be recreated")
|
||||
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
pod := new(corev1.Pod)
|
||||
// EphemeralRunner should not have any pod
|
||||
Eventually(func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
|
||||
if err != nil {
|
||||
return false, err
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
for _, cs := range pod.Status.ContainerStatuses {
|
||||
if cs.Name == v1alpha1.EphemeralRunnerContainerName {
|
||||
return cs.State.Terminated == nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
},
|
||||
).WithTimeout(20*time.Second).WithPolling(10*time.Millisecond).Should(BeEquivalentTo(true), "pod should be terminated")
|
||||
}
|
||||
|
||||
failEphemeralRunnerPod()
|
||||
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if kerrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
|
||||
return kerrors.IsNotFound(err), nil
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
It("It should re-create pod on eviction", func() {
|
||||
@@ -745,6 +643,53 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
It("It should re-create pod on exit status 0, but runner exists within the service", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: v1alpha1.EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
err := k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "failed to update pod status")
|
||||
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(updated.Status.Failures) == 1, nil
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
|
||||
|
||||
// should re-create after failure
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
pod := new(corev1.Pod)
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
It("It should not set the phase to succeeded without pod termination status", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
@@ -817,10 +762,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: mgr.GetClient(),
|
||||
multiClient: fake.NewMultiClient(
|
||||
ActionsClient: fake.NewMultiClient(
|
||||
fake.WithDefaultClient(
|
||||
fake.NewFakeClient(
|
||||
fake.WithGetRunner(
|
||||
@@ -836,8 +778,6 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
nil,
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).To(BeNil(), "failed to setup controller")
|
||||
@@ -845,7 +785,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
startManagers(GinkgoT(), mgr)
|
||||
})
|
||||
|
||||
It("It should delete EphemeralRunner when pod exits successfully", func() {
|
||||
It("It should set the Phase to Succeeded", func() {
|
||||
ephemeralRunner := newExampleRunner("test-runner", autoscalingNS.Name, configSecret.Name)
|
||||
|
||||
err := k8sClient.Create(ctx, ephemeralRunner)
|
||||
@@ -871,18 +811,13 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
Expect(err).To(BeNil(), "failed to update pod status")
|
||||
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(
|
||||
func() bool {
|
||||
err := k8sClient.Get(
|
||||
ctx,
|
||||
client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace},
|
||||
updated,
|
||||
)
|
||||
return kerrors.IsNotFound(err)
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeTrue())
|
||||
Eventually(func() (corev1.PodPhase, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
return updated.Status.Phase, nil
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(corev1.PodSucceeded))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -902,12 +837,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: mgr.GetClient(),
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).To(BeNil(), "failed to setup controller")
|
||||
@@ -917,12 +847,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
|
||||
It("uses an actions client with proxy transport", func() {
|
||||
// Use an actual client
|
||||
controller.ResourceBuilder = ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: mgr.GetClient(),
|
||||
multiClient: actions.NewMultiClient(logr.Discard()),
|
||||
},
|
||||
}
|
||||
controller.ActionsClient = actions.NewMultiClient(logr.Discard())
|
||||
|
||||
proxySuccessfulllyCalled := false
|
||||
proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -1076,12 +1001,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: mgr.GetClient(),
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
|
||||
err = controller.SetupWithManager(mgr)
|
||||
@@ -1112,16 +1032,11 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
server.StartTLS()
|
||||
|
||||
// Use an actual client
|
||||
controller.ResourceBuilder = ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: mgr.GetClient(),
|
||||
multiClient: actions.NewMultiClient(logr.Discard()),
|
||||
},
|
||||
}
|
||||
controller.ActionsClient = actions.NewMultiClient(logr.Discard())
|
||||
|
||||
ephemeralRunner := newExampleRunner("test-runner", autoScalingNS.Name, configSecret.Name)
|
||||
ephemeralRunner.Spec.GitHubConfigUrl = server.ConfigURLForOrg("my-org")
|
||||
ephemeralRunner.Spec.GitHubServerTLS = &v1alpha1.TLSConfig{
|
||||
ephemeralRunner.Spec.GitHubServerTLS = &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
|
||||
@@ -331,7 +331,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
|
||||
return false, nil
|
||||
}
|
||||
|
||||
actionsClient, err := r.GetActionsService(ctx, ephemeralRunnerSet)
|
||||
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunnerSet)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -439,7 +439,7 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
|
||||
log.Info("No pending or running ephemeral runners running at this time for scale down")
|
||||
return nil
|
||||
}
|
||||
actionsClient, err := r.GetActionsService(ctx, ephemeralRunnerSet)
|
||||
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunnerSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create actions client for ephemeral runner replica set: %w", err)
|
||||
}
|
||||
@@ -453,13 +453,8 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
|
||||
continue
|
||||
}
|
||||
|
||||
if !isDone && ephemeralRunner.HasJob() {
|
||||
log.Info(
|
||||
"Skipping ephemeral runner since it is running a job",
|
||||
"name", ephemeralRunner.Name,
|
||||
"workflowRunId", ephemeralRunner.Status.WorkflowRunId,
|
||||
"jobId", ephemeralRunner.Status.JobID,
|
||||
)
|
||||
if !isDone && ephemeralRunner.Status.JobRequestId > 0 {
|
||||
log.Info("Skipping ephemeral runner since it is running a job", "name", ephemeralRunner.Name, "jobRequestId", ephemeralRunner.Status.JobRequestId)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -507,6 +502,73 @@ func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ct
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerSetReconciler) actionsClientFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) (actions.ActionsService, error) {
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: rs.Namespace, Name: rs.Spec.EphemeralRunnerSpec.GitHubConfigSecret}, secret); err != nil {
|
||||
return nil, fmt.Errorf("failed to get secret: %w", err)
|
||||
}
|
||||
|
||||
opts, err := r.actionsClientOptionsFor(ctx, rs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get actions client options: %w", err)
|
||||
}
|
||||
|
||||
return r.ActionsClient.GetClientFromSecret(
|
||||
ctx,
|
||||
rs.Spec.EphemeralRunnerSpec.GitHubConfigUrl,
|
||||
rs.Namespace,
|
||||
secret.Data,
|
||||
opts...,
|
||||
)
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) ([]actions.ClientOption, error) {
|
||||
var opts []actions.ClientOption
|
||||
if rs.Spec.EphemeralRunnerSpec.Proxy != nil {
|
||||
proxyFunc, err := rs.Spec.EphemeralRunnerSpec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
|
||||
var secret corev1.Secret
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: rs.Namespace, Name: s}, &secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get secret %s: %w", s, err)
|
||||
}
|
||||
|
||||
return &secret, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get proxy func: %w", err)
|
||||
}
|
||||
|
||||
opts = append(opts, actions.WithProxy(proxyFunc))
|
||||
}
|
||||
|
||||
tlsConfig := rs.Spec.EphemeralRunnerSpec.GitHubServerTLS
|
||||
if tlsConfig != nil {
|
||||
pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) {
|
||||
var configmap corev1.ConfigMap
|
||||
err := r.Get(
|
||||
ctx,
|
||||
types.NamespacedName{
|
||||
Namespace: rs.Namespace,
|
||||
Name: name,
|
||||
},
|
||||
&configmap,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get configmap %s: %w", name, err)
|
||||
}
|
||||
|
||||
return []byte(configmap.Data[key]), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get tls config: %w", err)
|
||||
}
|
||||
|
||||
opts = append(opts, actions.WithRootCAs(pool))
|
||||
}
|
||||
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -22,7 +21,6 @@ import (
|
||||
"github.com/go-logr/logr"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
@@ -37,10 +35,6 @@ const (
|
||||
ephemeralRunnerSetTestGitHubToken = "gh_token"
|
||||
)
|
||||
|
||||
func TestPrecomputedConstants(t *testing.T) {
|
||||
require.Equal(t, len(failedRunnerBackoff), maxFailures+1)
|
||||
}
|
||||
|
||||
var _ = Describe("Test EphemeralRunnerSet controller", func() {
|
||||
var ctx context.Context
|
||||
var mgr ctrl.Manager
|
||||
@@ -57,12 +51,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: mgr.GetClient(),
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -1112,12 +1101,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: mgr.GetClient(),
|
||||
multiClient: actions.NewMultiClient(logr.Discard()),
|
||||
},
|
||||
},
|
||||
ActionsClient: actions.NewMultiClient(logr.Discard()),
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -1416,12 +1400,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: mgr.GetClient(),
|
||||
multiClient: actions.NewMultiClient(logr.Discard()),
|
||||
},
|
||||
},
|
||||
ActionsClient: actions.NewMultiClient(logr.Discard()),
|
||||
}
|
||||
err = controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -1460,7 +1439,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
|
||||
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
|
||||
GitHubConfigUrl: server.ConfigURLForOrg("my-org"),
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
GitHubServerTLS: &v1alpha1.TLSConfig{
|
||||
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
package actionsgithubcom
|
||||
|
||||
type controllerError string
|
||||
|
||||
func (e controllerError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
const (
|
||||
retryableError = controllerError("retryable error")
|
||||
fatalError = controllerError("fatal error")
|
||||
)
|
||||
@@ -5,20 +5,17 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
|
||||
"github.com/actions/actions-runner-controller/build"
|
||||
ghalistenerconfig "github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
||||
listenerconfig "github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/hash"
|
||||
"github.com/actions/actions-runner-controller/logging"
|
||||
"github.com/actions/actions-runner-controller/vault/azurekeyvault"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -74,7 +71,6 @@ func SetListenerEntrypoint(entrypoint string) {
|
||||
|
||||
type ResourceBuilder struct {
|
||||
ExcludeLabelPropagationPrefixes []string
|
||||
*SecretResolver
|
||||
}
|
||||
|
||||
// boolPtr returns a pointer to a bool value
|
||||
@@ -124,7 +120,6 @@ func (b *ResourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
|
||||
Spec: v1alpha1.AutoscalingListenerSpec{
|
||||
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
||||
VaultConfig: autoscalingRunnerSet.VaultConfig(),
|
||||
RunnerScaleSetId: runnerScaleSetId,
|
||||
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
|
||||
@@ -164,7 +159,7 @@ func (lm *listenerMetricsServerConfig) containerPort() (corev1.ContainerPort, er
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha1.AutoscalingListener, appConfig *appconfig.AppConfig, metricsConfig *listenerMetricsServerConfig, cert string) (*corev1.Secret, error) {
|
||||
func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, cert string) (*corev1.Secret, error) {
|
||||
var (
|
||||
metricsAddr = ""
|
||||
metricsEndpoint = ""
|
||||
@@ -174,8 +169,30 @@ func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha
|
||||
metricsEndpoint = metricsConfig.endpoint
|
||||
}
|
||||
|
||||
config := ghalistenerconfig.Config{
|
||||
var appID int64
|
||||
if id, ok := secret.Data["github_app_id"]; ok {
|
||||
var err error
|
||||
appID, err = strconv.ParseInt(string(id), 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert github_app_id to int: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var appInstallationID int64
|
||||
if id, ok := secret.Data["github_app_installation_id"]; ok {
|
||||
var err error
|
||||
appInstallationID, err = strconv.ParseInt(string(id), 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert github_app_installation_id to int: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
config := listenerconfig.Config{
|
||||
ConfigureUrl: autoscalingListener.Spec.GitHubConfigUrl,
|
||||
AppID: appID,
|
||||
AppInstallationID: appInstallationID,
|
||||
AppPrivateKey: string(secret.Data["github_app_private_key"]),
|
||||
Token: string(secret.Data["github_token"]),
|
||||
EphemeralRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
EphemeralRunnerSetName: autoscalingListener.Spec.EphemeralRunnerSetName,
|
||||
MaxRunners: autoscalingListener.Spec.MaxRunners,
|
||||
@@ -190,24 +207,6 @@ func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha
|
||||
Metrics: autoscalingListener.Spec.Metrics,
|
||||
}
|
||||
|
||||
vault := autoscalingListener.Spec.VaultConfig
|
||||
if vault == nil {
|
||||
config.AppConfig = appConfig
|
||||
} else {
|
||||
config.VaultType = vault.Type
|
||||
config.VaultLookupKey = autoscalingListener.Spec.GitHubConfigSecret
|
||||
config.AzureKeyVaultConfig = &azurekeyvault.Config{
|
||||
TenantID: vault.AzureKeyVault.TenantID,
|
||||
ClientID: vault.AzureKeyVault.ClientID,
|
||||
URL: vault.AzureKeyVault.URL,
|
||||
CertificatePath: vault.AzureKeyVault.CertificatePath,
|
||||
}
|
||||
}
|
||||
|
||||
if err := config.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("invalid listener config: %w", err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := json.NewEncoder(&buf).Encode(config); err != nil {
|
||||
return nil, fmt.Errorf("failed to encode config: %w", err)
|
||||
@@ -224,7 +223,7 @@ func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *ResourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, podConfig *corev1.Secret, serviceAccount *corev1.ServiceAccount, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
|
||||
func (b *ResourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, podConfig *corev1.Secret, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
|
||||
listenerEnv := []corev1.EnvVar{
|
||||
{
|
||||
Name: "LISTENER_CONFIG_PATH",
|
||||
@@ -279,7 +278,9 @@ func (b *ResourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
||||
}
|
||||
|
||||
labels := make(map[string]string, len(autoscalingListener.Labels))
|
||||
maps.Copy(labels, autoscalingListener.Labels)
|
||||
for key, val := range autoscalingListener.Labels {
|
||||
labels[key] = val
|
||||
}
|
||||
|
||||
newRunnerScaleSetListenerPod := &corev1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -428,7 +429,7 @@ func mergeListenerContainer(base, from *corev1.Container) {
|
||||
func (b *ResourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
|
||||
return &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingListener.Name,
|
||||
Name: scaleSetListenerServiceAccountName(autoscalingListener),
|
||||
Namespace: autoscalingListener.Namespace,
|
||||
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
@@ -443,7 +444,7 @@ func (b *ResourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.
|
||||
rulesHash := hash.ComputeTemplateHash(&rules)
|
||||
newRole := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingListener.Name,
|
||||
Name: scaleSetListenerRoleName(autoscalingListener),
|
||||
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
@@ -477,7 +478,7 @@ func (b *ResourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
|
||||
|
||||
newRoleBinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingListener.Name,
|
||||
Name: scaleSetListenerRoleName(autoscalingListener),
|
||||
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
@@ -495,6 +496,25 @@ func (b *ResourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
|
||||
return newRoleBinding
|
||||
}
|
||||
|
||||
func (b *ResourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret) *corev1.Secret {
|
||||
dataHash := hash.ComputeTemplateHash(&secret.Data)
|
||||
|
||||
newListenerSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerSecretMirrorName(autoscalingListener),
|
||||
Namespace: autoscalingListener.Namespace,
|
||||
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||
"secret-data-hash": dataHash,
|
||||
}),
|
||||
},
|
||||
Data: secret.DeepCopy().Data,
|
||||
}
|
||||
|
||||
return newListenerSecret
|
||||
}
|
||||
|
||||
func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
if err != nil {
|
||||
@@ -547,7 +567,6 @@ func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
|
||||
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
||||
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
||||
PodTemplateSpec: autoscalingRunnerSet.Spec.Template,
|
||||
VaultConfig: autoscalingRunnerSet.VaultConfig(),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -569,7 +588,6 @@ func (b *ResourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.Epheme
|
||||
for key, val := range ephemeralRunnerSet.Annotations {
|
||||
annotations[key] = val
|
||||
}
|
||||
|
||||
annotations[AnnotationKeyPatchID] = strconv.Itoa(ephemeralRunnerSet.Spec.PatchID)
|
||||
return &v1alpha1.EphemeralRunner{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
@@ -618,7 +636,7 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
|
||||
FilterLabels(labels, LabelKeyRunnerTemplateHash),
|
||||
annotations,
|
||||
runner.Spec,
|
||||
secret.Data,
|
||||
runner.Status.RunnerJITConfig,
|
||||
)
|
||||
|
||||
objectMeta := metav1.ObjectMeta{
|
||||
@@ -671,17 +689,14 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
|
||||
return &newPod
|
||||
}
|
||||
|
||||
func (b *ResourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner, jitConfig *actions.RunnerScaleSetJitRunnerConfig) *corev1.Secret {
|
||||
func (b *ResourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner) *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ephemeralRunner.Name,
|
||||
Namespace: ephemeralRunner.Namespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
jitTokenKey: []byte(jitConfig.EncodedJITConfig),
|
||||
"runnerName": []byte(jitConfig.Runner.Name),
|
||||
"runnerId": []byte(strconv.Itoa(jitConfig.Runner.Id)),
|
||||
"scaleSetId": []byte(strconv.Itoa(jitConfig.Runner.RunnerScaleSetId)),
|
||||
jitTokenKey: []byte(ephemeralRunner.Status.RunnerJITConfig),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -690,28 +705,44 @@ func scaleSetListenerConfigName(autoscalingListener *v1alpha1.AutoscalingListene
|
||||
return fmt.Sprintf("%s-config", autoscalingListener.Name)
|
||||
}
|
||||
|
||||
func hashSuffix(namespace, runnerGroup, configURL string) string {
|
||||
namespaceHash := hash.FNVHashString(namespace + "@" + runnerGroup + "@" + configURL)
|
||||
func scaleSetListenerName(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) string {
|
||||
namespaceHash := hash.FNVHashString(autoscalingRunnerSet.Namespace)
|
||||
if len(namespaceHash) > 8 {
|
||||
namespaceHash = namespaceHash[:8]
|
||||
}
|
||||
return namespaceHash
|
||||
return fmt.Sprintf("%v-%v-listener", autoscalingRunnerSet.Name, namespaceHash)
|
||||
}
|
||||
|
||||
func scaleSetListenerName(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) string {
|
||||
return fmt.Sprintf(
|
||||
"%v-%v-listener",
|
||||
autoscalingRunnerSet.Name,
|
||||
hashSuffix(
|
||||
autoscalingRunnerSet.Namespace,
|
||||
autoscalingRunnerSet.Spec.RunnerGroup,
|
||||
autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||
),
|
||||
)
|
||||
func scaleSetListenerServiceAccountName(autoscalingListener *v1alpha1.AutoscalingListener) string {
|
||||
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
|
||||
if len(namespaceHash) > 8 {
|
||||
namespaceHash = namespaceHash[:8]
|
||||
}
|
||||
return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash)
|
||||
}
|
||||
|
||||
func scaleSetListenerRoleName(autoscalingListener *v1alpha1.AutoscalingListener) string {
|
||||
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
|
||||
if len(namespaceHash) > 8 {
|
||||
namespaceHash = namespaceHash[:8]
|
||||
}
|
||||
return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash)
|
||||
}
|
||||
|
||||
func scaleSetListenerSecretMirrorName(autoscalingListener *v1alpha1.AutoscalingListener) string {
|
||||
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
|
||||
if len(namespaceHash) > 8 {
|
||||
namespaceHash = namespaceHash[:8]
|
||||
}
|
||||
return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash)
|
||||
}
|
||||
|
||||
func proxyListenerSecretName(autoscalingListener *v1alpha1.AutoscalingListener) string {
|
||||
return autoscalingListener.Name + "-proxy"
|
||||
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
|
||||
if len(namespaceHash) > 8 {
|
||||
namespaceHash = namespaceHash[:8]
|
||||
}
|
||||
return fmt.Sprintf("%v-%v-listener-proxy", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash)
|
||||
}
|
||||
|
||||
func proxyEphemeralRunnerSetSecretName(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) string {
|
||||
|
||||
@@ -82,7 +82,12 @@ func TestLabelPropagation(t *testing.T) {
|
||||
Name: "test",
|
||||
},
|
||||
}
|
||||
listenerPod, err := b.newScaleSetListenerPod(listener, &corev1.Secret{}, listenerServiceAccount, nil)
|
||||
listenerSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
}
|
||||
listenerPod, err := b.newScaleSetListenerPod(listener, &corev1.Secret{}, listenerServiceAccount, listenerSecret, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, listenerPod.Labels, listener.Labels)
|
||||
|
||||
|
||||
@@ -1,280 +0,0 @@
|
||||
package actionsgithubcom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/vault"
|
||||
"github.com/actions/actions-runner-controller/vault/azurekeyvault"
|
||||
"golang.org/x/net/http/httpproxy"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type SecretResolver struct {
|
||||
k8sClient client.Client
|
||||
multiClient actions.MultiClient
|
||||
}
|
||||
|
||||
type SecretResolverOption func(*SecretResolver)
|
||||
|
||||
func NewSecretResolver(k8sClient client.Client, multiClient actions.MultiClient, opts ...SecretResolverOption) *SecretResolver {
|
||||
if k8sClient == nil {
|
||||
panic("k8sClient must not be nil")
|
||||
}
|
||||
|
||||
secretResolver := &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: multiClient,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(secretResolver)
|
||||
}
|
||||
|
||||
return secretResolver
|
||||
}
|
||||
|
||||
type ActionsGitHubObject interface {
|
||||
client.Object
|
||||
GitHubConfigUrl() string
|
||||
GitHubConfigSecret() string
|
||||
GitHubProxy() *v1alpha1.ProxyConfig
|
||||
GitHubServerTLS() *v1alpha1.TLSConfig
|
||||
VaultConfig() *v1alpha1.VaultConfig
|
||||
VaultProxy() *v1alpha1.ProxyConfig
|
||||
}
|
||||
|
||||
func (sr *SecretResolver) GetAppConfig(ctx context.Context, obj ActionsGitHubObject) (*appconfig.AppConfig, error) {
|
||||
resolver, err := sr.resolverForObject(ctx, obj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get resolver for object: %v", err)
|
||||
}
|
||||
|
||||
appConfig, err := resolver.appConfig(ctx, obj.GitHubConfigSecret())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve app config: %v", err)
|
||||
}
|
||||
|
||||
return appConfig, nil
|
||||
}
|
||||
|
||||
func (sr *SecretResolver) GetActionsService(ctx context.Context, obj ActionsGitHubObject) (actions.ActionsService, error) {
|
||||
resolver, err := sr.resolverForObject(ctx, obj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get resolver for object: %v", err)
|
||||
}
|
||||
|
||||
appConfig, err := resolver.appConfig(ctx, obj.GitHubConfigSecret())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve app config: %v", err)
|
||||
}
|
||||
|
||||
var clientOptions []actions.ClientOption
|
||||
if proxy := obj.GitHubProxy(); proxy != nil {
|
||||
config := &httpproxy.Config{
|
||||
NoProxy: strings.Join(proxy.NoProxy, ","),
|
||||
}
|
||||
|
||||
if proxy.HTTP != nil {
|
||||
u, err := url.Parse(proxy.HTTP.Url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse proxy http url %q: %w", proxy.HTTP.Url, err)
|
||||
}
|
||||
|
||||
if ref := proxy.HTTP.CredentialSecretRef; ref != "" {
|
||||
u.User, err = resolver.proxyCredentials(ctx, ref)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve proxy credentials: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
config.HTTPProxy = u.String()
|
||||
}
|
||||
|
||||
if proxy.HTTPS != nil {
|
||||
u, err := url.Parse(proxy.HTTPS.Url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse proxy https url %q: %w", proxy.HTTPS.Url, err)
|
||||
}
|
||||
|
||||
if ref := proxy.HTTPS.CredentialSecretRef; ref != "" {
|
||||
u.User, err = resolver.proxyCredentials(ctx, ref)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve proxy credentials: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
config.HTTPSProxy = u.String()
|
||||
}
|
||||
|
||||
proxyFunc := func(req *http.Request) (*url.URL, error) {
|
||||
return config.ProxyFunc()(req.URL)
|
||||
}
|
||||
|
||||
clientOptions = append(clientOptions, actions.WithProxy(proxyFunc))
|
||||
}
|
||||
|
||||
tlsConfig := obj.GitHubServerTLS()
|
||||
if tlsConfig != nil {
|
||||
pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) {
|
||||
var configmap corev1.ConfigMap
|
||||
err := sr.k8sClient.Get(
|
||||
ctx,
|
||||
types.NamespacedName{
|
||||
Namespace: obj.GetNamespace(),
|
||||
Name: name,
|
||||
},
|
||||
&configmap,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get configmap %s: %w", name, err)
|
||||
}
|
||||
|
||||
return []byte(configmap.Data[key]), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get tls config: %w", err)
|
||||
}
|
||||
|
||||
clientOptions = append(clientOptions, actions.WithRootCAs(pool))
|
||||
}
|
||||
|
||||
return sr.multiClient.GetClientFor(
|
||||
ctx,
|
||||
obj.GitHubConfigUrl(),
|
||||
appConfig,
|
||||
obj.GetNamespace(),
|
||||
clientOptions...,
|
||||
)
|
||||
}
|
||||
|
||||
func (sr *SecretResolver) resolverForObject(ctx context.Context, obj ActionsGitHubObject) (resolver, error) {
|
||||
vaultConfig := obj.VaultConfig()
|
||||
if vaultConfig == nil || vaultConfig.Type == "" {
|
||||
return &k8sResolver{
|
||||
namespace: obj.GetNamespace(),
|
||||
client: sr.k8sClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var proxy *httpproxy.Config
|
||||
if vaultProxy := obj.VaultProxy(); vaultProxy != nil {
|
||||
p, err := vaultProxy.ToHTTPProxyConfig(func(s string) (*corev1.Secret, error) {
|
||||
var secret corev1.Secret
|
||||
err := sr.k8sClient.Get(ctx, types.NamespacedName{Name: s, Namespace: obj.GetNamespace()}, &secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get secret %s: %w", s, err)
|
||||
}
|
||||
return &secret, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create proxy config: %v", err)
|
||||
}
|
||||
proxy = p
|
||||
}
|
||||
|
||||
switch vaultConfig.Type {
|
||||
case vault.VaultTypeAzureKeyVault:
|
||||
akv, err := azurekeyvault.New(azurekeyvault.Config{
|
||||
TenantID: vaultConfig.AzureKeyVault.TenantID,
|
||||
ClientID: vaultConfig.AzureKeyVault.ClientID,
|
||||
URL: vaultConfig.AzureKeyVault.URL,
|
||||
CertificatePath: vaultConfig.AzureKeyVault.CertificatePath,
|
||||
Proxy: proxy,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Azure Key Vault client: %v", err)
|
||||
}
|
||||
return &vaultResolver{
|
||||
vault: akv,
|
||||
}, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown vault type %q", vaultConfig.Type)
|
||||
}
|
||||
}
|
||||
|
||||
type resolver interface {
|
||||
appConfig(ctx context.Context, key string) (*appconfig.AppConfig, error)
|
||||
proxyCredentials(ctx context.Context, key string) (*url.Userinfo, error)
|
||||
}
|
||||
|
||||
type k8sResolver struct {
|
||||
namespace string
|
||||
client client.Client
|
||||
}
|
||||
|
||||
func (r *k8sResolver) appConfig(ctx context.Context, key string) (*appconfig.AppConfig, error) {
|
||||
nsName := types.NamespacedName{
|
||||
Namespace: r.namespace,
|
||||
Name: key,
|
||||
}
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.client.Get(
|
||||
ctx,
|
||||
nsName,
|
||||
secret,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("failed to get kubernetes secret: %q", nsName.String())
|
||||
}
|
||||
|
||||
return appconfig.FromSecret(secret)
|
||||
}
|
||||
|
||||
func (r *k8sResolver) proxyCredentials(ctx context.Context, key string) (*url.Userinfo, error) {
|
||||
nsName := types.NamespacedName{Namespace: r.namespace, Name: key}
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.client.Get(
|
||||
ctx,
|
||||
nsName,
|
||||
secret,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("failed to get kubernetes secret: %q", nsName.String())
|
||||
}
|
||||
|
||||
return url.UserPassword(
|
||||
string(secret.Data["username"]),
|
||||
string(secret.Data["password"]),
|
||||
), nil
|
||||
}
|
||||
|
||||
type vaultResolver struct {
|
||||
vault vault.Vault
|
||||
}
|
||||
|
||||
func (r *vaultResolver) appConfig(ctx context.Context, key string) (*appconfig.AppConfig, error) {
|
||||
val, err := r.vault.GetSecret(ctx, key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve secret: %v", err)
|
||||
}
|
||||
|
||||
return appconfig.FromJSONString(val)
|
||||
}
|
||||
|
||||
func (r *vaultResolver) proxyCredentials(ctx context.Context, key string) (*url.Userinfo, error) {
|
||||
val, err := r.vault.GetSecret(ctx, key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve secret: %v", err)
|
||||
}
|
||||
|
||||
type info struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
var i info
|
||||
if err := json.Unmarshal([]byte(val), &i); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal info: %v", err)
|
||||
}
|
||||
|
||||
return url.UserPassword(i.Username, i.Password), nil
|
||||
}
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
|
||||
@@ -80,15 +79,6 @@ var _ = BeforeSuite(func() {
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(k8sClient).ToNot(BeNil())
|
||||
|
||||
failedRunnerBackoff = []time.Duration{
|
||||
20 * time.Millisecond,
|
||||
20 * time.Millisecond,
|
||||
20 * time.Millisecond,
|
||||
20 * time.Millisecond,
|
||||
20 * time.Millisecond,
|
||||
20 * time.Millisecond,
|
||||
}
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
|
||||
@@ -130,7 +130,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
||||
jobs, resp, err := ghc.Actions.ListWorkflowJobs(context.TODO(), user, repoName, runID, &opt)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Error listing workflow jobs")
|
||||
return // err
|
||||
return //err
|
||||
}
|
||||
allJobs = append(allJobs, jobs.Jobs...)
|
||||
if resp.NextPage == 0 {
|
||||
|
||||
@@ -321,34 +321,26 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) findHRAsByKey(ctx con
|
||||
defaultListOpts = append(defaultListOpts, client.InNamespace(ns))
|
||||
}
|
||||
|
||||
// Get all HRAs since we can't use the index for repository/organization lookup anymore
|
||||
var hras []v1alpha1.HorizontalRunnerAutoscaler
|
||||
|
||||
if value != "" {
|
||||
opts := append([]client.ListOption{}, defaultListOpts...)
|
||||
opts = append(opts, client.MatchingFields{scaleTargetKey: value})
|
||||
|
||||
if autoscaler.Namespace != "" {
|
||||
opts = append(opts, client.InNamespace(autoscaler.Namespace))
|
||||
}
|
||||
|
||||
var hraList v1alpha1.HorizontalRunnerAutoscalerList
|
||||
if err := autoscaler.List(ctx, &hraList, defaultListOpts...); err != nil {
|
||||
|
||||
if err := autoscaler.List(ctx, &hraList, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var matchingHRAs []v1alpha1.HorizontalRunnerAutoscaler
|
||||
|
||||
if value == "" {
|
||||
return matchingHRAs, nil
|
||||
hras = append(hras, hraList.Items...)
|
||||
}
|
||||
|
||||
// For each HRA, resolve its ScaleTargetRef and check if it matches the requested value
|
||||
for _, hra := range hraList.Items {
|
||||
if hra.Spec.ScaleTargetRef.Name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
keys := autoscaler.getHRAKeys(ctx, &hra)
|
||||
for _, key := range keys {
|
||||
if key == value {
|
||||
matchingHRAs = append(matchingHRAs, hra)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return matchingHRAs, nil
|
||||
return hras, nil
|
||||
}
|
||||
|
||||
type ScaleTarget struct {
|
||||
@@ -718,36 +710,10 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) indexer(rawObj client
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return a simple key based on the ScaleTargetRef to avoid client calls in indexer
|
||||
// The actual repository/organization resolution will be done later when needed
|
||||
kind := hra.Spec.ScaleTargetRef.Kind
|
||||
if kind == "" {
|
||||
kind = "RunnerDeployment" // default
|
||||
}
|
||||
|
||||
key := fmt.Sprintf("%s/%s/%s", kind, hra.Namespace, hra.Spec.ScaleTargetRef.Name)
|
||||
autoscaler.Log.V(2).Info(fmt.Sprintf("HRA indexed for HRA %s with key: %s", hra.Name, key))
|
||||
return []string{key}
|
||||
}
|
||||
|
||||
func enterpriseKey(name string) string {
|
||||
return keyPrefixEnterprise + name
|
||||
}
|
||||
|
||||
func organizationalRunnerGroupKey(owner, group string) string {
|
||||
return owner + keyRunnerGroup + group
|
||||
}
|
||||
|
||||
func enterpriseRunnerGroupKey(enterprise, group string) string {
|
||||
return keyPrefixEnterprise + enterprise + keyRunnerGroup + group
|
||||
}
|
||||
|
||||
// getHRAKeys resolves the ScaleTargetRef and returns the repository/organization keys for an HRA
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getHRAKeys(ctx context.Context, hra *v1alpha1.HorizontalRunnerAutoscaler) []string {
|
||||
switch hra.Spec.ScaleTargetRef.Kind {
|
||||
case "", "RunnerDeployment":
|
||||
var rd v1alpha1.RunnerDeployment
|
||||
if err := autoscaler.Get(ctx, types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
||||
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
||||
autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerDeployment not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name))
|
||||
return nil
|
||||
}
|
||||
@@ -770,10 +736,11 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getHRAKeys(ctx contex
|
||||
keys = append(keys, enterpriseKey(enterprise)) // Enterprise runners
|
||||
}
|
||||
}
|
||||
autoscaler.Log.V(2).Info(fmt.Sprintf("HRA keys indexed for HRA %s: %v", hra.Name, keys))
|
||||
return keys
|
||||
case "RunnerSet":
|
||||
var rs v1alpha1.RunnerSet
|
||||
if err := autoscaler.Get(ctx, types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
|
||||
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
|
||||
autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerSet not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name))
|
||||
return nil
|
||||
}
|
||||
@@ -794,8 +761,21 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getHRAKeys(ctx contex
|
||||
keys = append(keys, enterpriseRunnerGroupKey(enterprise, rs.Spec.Group)) // Enterprise runner groups
|
||||
}
|
||||
}
|
||||
autoscaler.Log.V(2).Info(fmt.Sprintf("HRA keys indexed for HRA %s: %v", hra.Name, keys))
|
||||
return keys
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func enterpriseKey(name string) string {
|
||||
return keyPrefixEnterprise + name
|
||||
}
|
||||
|
||||
func organizationalRunnerGroupKey(owner, group string) string {
|
||||
return owner + keyRunnerGroup + group
|
||||
}
|
||||
|
||||
func enterpriseRunnerGroupKey(enterprise, group string) string {
|
||||
return keyPrefixEnterprise + enterprise + keyRunnerGroup + group
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -420,14 +419,9 @@ func TestGetValidCapacityReservations(t *testing.T) {
|
||||
func installTestLogger(webhook *HorizontalRunnerAutoscalerGitHubWebhook) *bytes.Buffer {
|
||||
logs := &bytes.Buffer{}
|
||||
|
||||
// Wrap the buffer with a synchronized writer to prevent race conditions
|
||||
syncWriter := &syncWriter{
|
||||
writer: logs,
|
||||
}
|
||||
|
||||
sink := &testLogSink{
|
||||
name: "testlog",
|
||||
writer: syncWriter,
|
||||
writer: logs,
|
||||
}
|
||||
|
||||
log := logr.New(sink)
|
||||
@@ -523,18 +517,6 @@ func sendWebhook(server *httptest.Server, eventType string, event interface{}) (
|
||||
return http.DefaultClient.Do(req)
|
||||
}
|
||||
|
||||
// syncWriter wraps an io.Writer with a mutex for thread-safe writes
|
||||
type syncWriter struct {
|
||||
writer io.Writer
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (sw *syncWriter) Write(p []byte) (n int, err error) {
|
||||
sw.mu.Lock()
|
||||
defer sw.mu.Unlock()
|
||||
return sw.writer.Write(p)
|
||||
}
|
||||
|
||||
// testLogSink is a sample logr.Logger that logs in-memory.
|
||||
// It's only for testing log outputs.
|
||||
type testLogSink struct {
|
||||
|
||||
@@ -1046,12 +1046,12 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
||||
// overridden
|
||||
if ok, _ := envVarPresent("DOCKER_GROUP_GID", dockerdContainer.Env); !ok {
|
||||
gid := d.DockerGID
|
||||
// We default to gid 121 for Ubuntu 22.04 and 24.04 images
|
||||
// We default to gid 121 for Ubuntu 22.04 images
|
||||
// See below for more details
|
||||
// - https://github.com/actions/actions-runner-controller/issues/2490#issuecomment-1501561923
|
||||
// - https://github.com/actions/actions-runner-controller/blob/8869ad28bb5a1daaedefe0e988571fe1fb36addd/runner/actions-runner.ubuntu-20.04.dockerfile#L14
|
||||
// - https://github.com/actions/actions-runner-controller/blob/8869ad28bb5a1daaedefe0e988571fe1fb36addd/runner/actions-runner.ubuntu-22.04.dockerfile#L12
|
||||
if strings.Contains(runnerContainer.Image, "22.04") || strings.Contains(runnerContainer.Image, "24.04") {
|
||||
if strings.Contains(runnerContainer.Image, "22.04") {
|
||||
gid = "121"
|
||||
} else if strings.Contains(runnerContainer.Image, "20.04") {
|
||||
gid = "1001"
|
||||
|
||||
@@ -188,7 +188,7 @@ Create one using e.g. `eksctl`. You can refer to [the EKS documentation](https:/
|
||||
|
||||
Once you set up the service account, all you need is to add `serviceAccountName` and `fsGroup` to any pods that use the IAM-role enabled service account.
|
||||
|
||||
`fsGroup` needs to be set to the UID of the `runner` Linux user that runs the runner agent (and dockerd in case you use dind-runner). For anyone using an Ubuntu 20.04 runner image it's `1000` and for Ubuntu 22.04 and 24.04 one it's `1001`.
|
||||
`fsGroup` needs to be set to the UID of the `runner` Linux user that runs the runner agent (and dockerd in case you use dind-runner). For anyone using an Ubuntu 20.04 runner image it's `1000` and for Ubuntu 22.04 one it's `1001`.
|
||||
|
||||
For `RunnerDeployment`, you can set those two fields under the runner spec at `RunnerDeployment.Spec.Template`:
|
||||
|
||||
@@ -200,12 +200,12 @@ metadata:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
repository: USER/REPO
|
||||
repository: USER/REO
|
||||
serviceAccountName: my-service-account
|
||||
securityContext:
|
||||
# For Ubuntu 20.04 runner
|
||||
fsGroup: 1000
|
||||
# Use 1001 for Ubuntu 22.04 and 24.04 runner
|
||||
# Use 1001 for Ubuntu 22.04 runner
|
||||
#fsGroup: 1001
|
||||
```
|
||||
|
||||
|
||||
@@ -43,53 +43,6 @@ You can follow [this troubleshooting guide](https://docs.github.com/en/actions/h
|
||||
|
||||
## Changelog
|
||||
|
||||
### 0.13.0
|
||||
|
||||
1. Remove workflow actions version comments since upgrades are done via dependabot [#4161](https://github.com/actions/actions-runner-controller/pull/4161)
|
||||
1. Fix image pull secrets list arguments in the chart [#4164](https://github.com/actions/actions-runner-controller/pull/4164)
|
||||
1. Update example GitHub URLs in values.yaml to include an example for enterprise account-level runners [#4181](https://github.com/actions/actions-runner-controller/pull/4181)
|
||||
1. docs: fix repo path typo [#4229](https://github.com/actions/actions-runner-controller/pull/4229)
|
||||
1. Remove deprecated preserveUnknownFields from CRDs [#4135](https://github.com/actions/actions-runner-controller/pull/4135)
|
||||
1. Add workflow name and target labels [#4240](https://github.com/actions/actions-runner-controller/pull/4240)
|
||||
1. docs: fix broken Grafana dashboard JSON path [#4270](https://github.com/actions/actions-runner-controller/pull/4270)
|
||||
1. Ensure ephemeral runner is deleted from the service on exit != 0 [#4260](https://github.com/actions/actions-runner-controller/pull/4260)
|
||||
1. Remove JIT config from ephemeral runner status field [#4191](https://github.com/actions/actions-runner-controller/pull/4191)
|
||||
1. Remove ephemeral runner when exit code != 0 and is patched with the job [#4239](https://github.com/actions/actions-runner-controller/pull/4239)
|
||||
1. Bump the gomod group across 1 directory with 4 updates [#4277](https://github.com/actions/actions-runner-controller/pull/4277)
|
||||
1. Bump all dependencies [#4266](https://github.com/actions/actions-runner-controller/pull/4266)
|
||||
|
||||
|
||||
### 0.12.1
|
||||
|
||||
1. Fix indentation of startupProbe attributes in dind sidecar [#4126](https://github.com/actions/actions-runner-controller/pull/4126)
|
||||
1. Remove duplicate float64 call [#4139](https://github.com/actions/actions-runner-controller/pull/4139)
|
||||
1. Fix dind sidecar template [#4128](https://github.com/actions/actions-runner-controller/pull/4128)
|
||||
1. Remove check if runner exists after exit code 0 [#4142](https://github.com/actions/actions-runner-controller/pull/4142)
|
||||
1. Explicitly requeue during backoff ephemeral runner [#4152](https://github.com/actions/actions-runner-controller/pull/4152)
|
||||
|
||||
### 0.12.0
|
||||
|
||||
1. Allow use of client id as an app id [#4057](https://github.com/actions/actions-runner-controller/pull/4057)
|
||||
1. Relax version requirements to allow patch version mismatch [#4080](https://github.com/actions/actions-runner-controller/pull/4080)
|
||||
1. Refactor resource naming removing unnecessary calculations [#4076](https://github.com/actions/actions-runner-controller/pull/4076)
|
||||
1. Fix busy runners metric [#4016](https://github.com/actions/actions-runner-controller/pull/4016)
|
||||
1. Include more context to errors raised by github/actions client [#4032](https://github.com/actions/actions-runner-controller/pull/4032)
|
||||
1. Revised dashboard [#4022](https://github.com/actions/actions-runner-controller/pull/4022)
|
||||
1. feat(helm): move dind to sidecar [#3842](https://github.com/actions/actions-runner-controller/pull/3842)
|
||||
1. Pin third party actions [#3981](https://github.com/actions/actions-runner-controller/pull/3981)
|
||||
1. Fix docker lint warnings [#4074](https://github.com/actions/actions-runner-controller/pull/4074)
|
||||
1. Bump the gomod group across 1 directory with 7 updates [#4008](https://github.com/actions/actions-runner-controller/pull/4008)
|
||||
1. Bump go version [#4075](https://github.com/actions/actions-runner-controller/pull/4075)
|
||||
1. Add job_workflow_ref label to listener metrics [#4054](https://github.com/actions/actions-runner-controller/pull/4054)
|
||||
1. Bump github.com/cloudflare/circl from 1.6.0 to 1.6.1 [#4118](https://github.com/actions/actions-runner-controller/pull/4118)
|
||||
1. Avoid nil point when config.Metrics is nil and expose all metrics if none are configured [#4101](https://github.com/actions/actions-runner-controller/pull/4101)
|
||||
1. Bump github.com/golang-jwt/jwt/v5 from 5.2.1 to 5.2.2 [#4120](https://github.com/actions/actions-runner-controller/pull/4120)
|
||||
1. Add startup probe to dind side-car [#4117](https://github.com/actions/actions-runner-controller/pull/4117)
|
||||
1. Delete config secret when listener pod gets deleted [#4033](https://github.com/actions/actions-runner-controller/pull/4033)
|
||||
1. Add response body to error when fetching access token [#4005](https://github.com/actions/actions-runner-controller/pull/4005)
|
||||
1. Azure Key Vault integration to resolve secrets [#4090](https://github.com/actions/actions-runner-controller/pull/4090)
|
||||
1. Create backoff mechanism for failed runners and allow re-creation of failed ephemeral runners [#4059](https://github.com/actions/actions-runner-controller/pull/4059)
|
||||
|
||||
### 0.11.0
|
||||
|
||||
1. Add events role permission to leader_election_role [#3988](https://github.com/actions/actions-runner-controller/pull/3988)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,11 +1,6 @@
|
||||
# Visualizing Autoscaling Runner Scale Set metrics with Grafana
|
||||
|
||||
With the metrics support introduced in [gha-runner-scale-set-0.5.0](https://github.com/actions/actions-runner-controller/releases/tag/gha-runner-scale-set-0.5.0), you can visualize the autoscaling behavior of your runner scale set with your tool of choice.
|
||||
|
||||
This sample dashboard shows how to visualize the metrics with [Grafana](https://grafana.com/).
|
||||
|
||||
> [!NOTE]
|
||||
> We do not intend to provide a supported ARC dashboard. This is simply a reference and a demonstration for how you could leverage the metrics emitted by the controller-manager and listeners to visualize the autoscaling behavior of your runner scale set. We offer no promises of future upgrades to this sample.
|
||||
With metrics introduced in [gha-runner-scale-set-0.5.0](https://github.com/actions/actions-runner-controller/releases/tag/gha-runner-scale-set-0.5.0), you can now visualize the autoscaling behavior of your runner scale set with your tool of choice. This sample shows how to visualize the metrics with [Grafana](https://grafana.com/).
|
||||
|
||||
## Demo
|
||||
|
||||
@@ -13,42 +8,11 @@ This sample dashboard shows how to visualize the metrics with [Grafana](https://
|
||||
|
||||
## Setup
|
||||
|
||||
We do not intend to provide a supported ARC dashboard. This is simply a reference and a demonstration for how you could leverage the metrics emitted by the controller-manager and listeners to visualize the autoscaling behavior of your runner scale set. We offer no promises of future upgrades to this sample.
|
||||
|
||||
1. Make sure to have [Grafana](https://grafana.com/docs/grafana/latest/installation/) and [Prometheus](https://prometheus.io/docs/prometheus/latest/installation/) running in your cluster.
|
||||
2. Make sure that Prometheus is properly scraping the metrics endpoints of the controller-manager and listeners.
|
||||
3. Import the [dashboard](ARC-Autoscaling-Runner-Set-Monitoring.json) into Grafana.
|
||||
|
||||
## Required metrics
|
||||
|
||||
This sample relies on the suggestion listener metrics configuration in the scale set [values.yaml](https://github.com/actions/actions-runner-controller/blob/ea27448da51385470b1ce67150aa695cfa45fd3f/charts/gha-runner-scale-set/values.yaml#L129-L270).
|
||||
|
||||
The following metrics are required to be scraped by Prometheus in order to populate the dashboard:
|
||||
|
||||
| Metric | Required labels | Source |
|
||||
| ------ | ----------- | -----|
|
||||
| container_fs_writes_bytes_total | namespace | cAdvisor
|
||||
| container_fs_reads_bytes_total | namespace | cAdvisor
|
||||
| container_memory_working_set_bytes | namespace | cAdvisor
|
||||
| controller_runtime_active_workers | controller | ARC Controller
|
||||
| controller_runtime_reconcile_time_seconds_sum | namespace | ARC Controller
|
||||
| controller_runtime_reconcile_errors_total | namespace | ARC Controller
|
||||
| gha_assigned_jobs | actions_github_com_scale_set_name, namespace | ARC Controller
|
||||
| gha_controller_failed_ephemeral_runners | name, namespace | ARC Controller
|
||||
| gha_controller_pending_ephemeral_runners | name, namespace | ARC Controller
|
||||
| gha_controller_running_ephemeral_runners | name, namespace | ARC Controller
|
||||
| gha_controller_running_listeners | namespace | ARC Controller
|
||||
| gha_desired_runners | actions_github_com_scale_set_name, namespace | ARC Listener
|
||||
| gha_idle_runners | actions_github_com_scale_set_name, namespace | ARC Listener
|
||||
| gha_job_execution_duration_seconds_bucket | actions_github_com_scale_set_name, actions_github_com_scale_set_namespace | ARC Listener
|
||||
| gha_job_startup_duration_seconds_bucket | actions_github_com_scale_set_name, actions_github_com_scale_set_namespace | ARC Listener
|
||||
| gha_registered_runners | actions_github_com_scale_set_name, namespace | ARC Listener
|
||||
| gha_running_jobs | actions_github_com_scale_set_name, actions_github_com_scale_set_namespace | ARC Listener
|
||||
| kube_pod_container_status_ready | namespace | kube-state-metrics
|
||||
| kube_pod_container_status_terminated_reason | namespace, reason | kube-state-metrics
|
||||
| kube_pod_container_status_waiting | namespace | kube-state-metrics
|
||||
| rest_client_requests_total | code, method, namespace | ARC Controller
|
||||
| scrape_duration_seconds | | prometheus
|
||||
| workqueue_depth | name, namespace | ARC Controller
|
||||
| workqueue_queue_duration_seconds_sum | namespace | ARC Controller
|
||||
3. Import the [dashboard](ARC-Autoscaling-Runner-Set-Monitoring_1692627561838.json) into Grafana.
|
||||
|
||||
## Details
|
||||
|
||||
@@ -58,25 +22,16 @@ The dashboard includes the following metrics:
|
||||
|
||||
| Label | Description |
|
||||
| -------------------------------- | ----------------------------------------------------|
|
||||
| Startup Duration | Heat map of the wait time before a job starts, with the colors indicating the increase in the number of jobs in that time bucket. An increasing time can indicate that the cluster is resource constrained and may need additional nodes or resources to handle the load. |
|
||||
| Execution Duration | Heat map of the execution time for a job, with the colors indicating the increase in the number of jobs in that time bucket. Time can be affected by the number of steps in the job, the allocated CPU, and whether there is resource contention on the node that is impacting performance |
|
||||
| Assigned Jobs | The number of jobs that have been assigned to the listener. This is the number of jobs that the listener is responsible for providing a runner to process. |
|
||||
| Desired Runners | The number of runners that the listener is requesting from the controller. This is the number of runners required to process the assigned jobs and provide idle runners. It is limited by the configured maximum runner count for the scale set. |
|
||||
| Idle Runners | The total number of ephemeral runners that are available to accept jobs across all selected scale sets. Keeping a pool of idle runners can enable a faster start time under load, but excessive idle runners will consume resources and can prevent nodes from scaling down. |
|
||||
| Running Jobs | The number of runners that are currently processing jobs. |
|
||||
| Failed Runners | The total number of ephemeral runners that have failed to properly start. This may require reviewing the custom resource and logs to identify and resolve the root causes. Common causes include resource issues and failure to pull the required image. |
|
||||
| Listeners | The number of listeners currently running and attempting to manage jobs for the scale set. This should match the number of scale sets deployed. |
|
||||
| Pending Runners | The total number of ephemeral runners that ARC has requested and is waiting for Kubernetes to provide in a running state. If the Kubernetes API server is responsive, this will typically match the number of runner pods that are in a pending state. This number includes requests for runner pods that have not yet been scheduled. When this number is higher than the number of runner pods in a pending state, it can indicate performance issues. |
|
||||
| Registered Runners | The total number of ephemeral runners that have been successfully registered. |
|
||||
| Active Runners | The total number of runners that are active and either available or processing jobs. |
|
||||
| Out of Memory | The number of containers that have been terminated by the OOMKiller. This can indicate that the requests/ limits for one or more pods on the node were configured improperly, allowing pods to request more memory than the node had available. |
|
||||
| Peak Container Memory | The maximum amount of memory used by any container in a given namespace during the selected time. This can be used for tuning the memory limits for the pods and for alerts as containers get close to their limits.
|
||||
| Container I/O | Shows the number of bytes read and written to the container filesystem. This can be used to identify if the container is reading or writing a large amount of data to the filesystem, which can impact performance. |
|
||||
| Container Pod Status | Shows the number of containers in each status (waiting, running, terminated, ready). This can be used to identify if there are a large number of containers that are failing to start or are in a waiting state. |
|
||||
| Reconcile time | The time to perform a single reconciliation task from a controller's work queue. This metric reflects the time it takes for ARC to complete each step in the processing of creating, managing, and cleaning up runners. As this increases, it can indicate resource contention, processing delays, or delays from the API server. |
|
||||
| Workqueue Queue Duration | The time items spent in the work queue for a controller before being processed. This is often related to the work queue depth; as the number of items increases, it can take an increasing amount of time for an item to be processed. |
|
||||
| Active listeners | The number of listeners currently running and attempting to manage jobs for the scale set. This should match the number of scale sets deployed. |
|
||||
| Runner States | Displays the number of runners in a given state. The finished and deleted states are not included in this panel. |
|
||||
| Failed (total) | The total number of ephemeral runners that have failed to properly start. This may require reviewing the custom resource and logs to identify and resolve the root causes. Common causes include resource issues and failure to pull the required image. |
|
||||
| Pending (total) | The total number of ephemeral runners that ARC has requested and is waiting for Kubernetes to provide in a running state. If the Kubernetes API server is responsive, this will typically match the number of runner pods that are in a pending state. This number includes requests for runner pods that have not yet been scheduled. When this number is higher than the number of runner pods in a pending state, it can indicate performance issues with the API server and resource contention. |
|
||||
| Idle (total) | The total number of ephemeral runners that are available to accept jobs across all scale sets. Keeping a pool of idle runners can enable a faster start time under load, but excessive idle runners will consume resources and can prevent nodes from scaling down. |
|
||||
| Total assigned jobs per listener | The number of workflow jobs acquired and assigned to the listener. The listener must provide supporting runners to complete these jobs. Once jobs are assigned, they cannot be delegated to other listeners and must be processed by the scale set or cancelled. |
|
||||
| Assigned vs running jobs | Compares the number of jobs assigned against the number of runners that are currently processing jobs. When running jobs is less than assigned jobs, it can indicate that ARC is waiting on Kubernetes to provide and start additional runners. |
|
||||
| Average startup duration | The average time in seconds between when jobs are assigned and when a runner accepts the job and begins processing. An increasing duration can indicate that the cluster has resource contention or a lack of available nodes for scheduling jobs |
|
||||
| Average execution duration | The average time in seconds that runners are taking to complete a job. Changes in this value reflect the efficiency of workflow jobs and the pod configuration. If the value is decreasing without changes to the job, it can indicate resource contention or CPU throttling. |
|
||||
| Reconciliation errors | Reconciliation is the process of a controller ensuring the desired state and actual state of the resources match. Each time an event occurs on a resource watched by the controller, the controller is required to indicate if the new state matches the desired state. Kubernetes adds a task to the work queue for the controller to perform this reconciliation. Errors indicate that controller has not achieved a desired state and is requesting Kubernetes to queue another request for reconciliation. Ideally, this number remains close to zero. An increasing number can indicate resource contention or delays processing API server requests. This reflects Kubernetes resources that ARC is waiting to be provided or in the necessary state. As a concrete example, ARC will request the creation of a secret prior to creating the pod. If the response indicates the secret is not immediately ready, ARC will requeue the reconciliation task with the error details, incrementing this count. |
|
||||
| Workqueue depth | The number of tasks that Kubernetes has queued for the ARC controllers to process. This includes reconciliation requests and tasks initiated by the controller. Managing a runner requires multiple steps to prepare, create, update, and delete the runner, its resources, and the ARC custom resources. As each step is completed (or trigger reconciliation), new tasks are queued for processing. The controller will then use one or more workers to process these tasks in the order they were queued. As the depth increases, it indicates more tasks awaiting time from the controller. Growth indicates increasing work and may reflect Kubernetes resource contention or processing latencies. Each request for a new runner will result in multiple tasks being added to the work queue to prepare and create the runner and the related ARC custom resources. |
|
||||
| Active Workers | The number of workers that are actively processing tasks in the work queue. If the queue is empty, then there may be no workers required to process the tasks. The number of workers for the ephemeral runner is configurable in the scale set values file. |
|
||||
| API Calls | Shows the number of calls to the API server by status code and HTTP method. The method indicates the type of activity being performed, while the status code indicates the result of the activity. Error codes of 500 and above often indicate a Kubernetes issue. |
|
||||
| Reconciliation time | A histogram reflecting the time in seconds to perform a single reconciliation task from the controller's work queue. A histogram counts the number of requests that are processed within a given bucket of time. This metric reflects the time it takes for ARC to complete each step in the processing of creating, managing, and cleaning up runners. As this increases, it can indicate resource contention or processing delays within Kubernetes or the API server. This displays shows an average, which may hide larger or smaller times that are occurring in the processing. |
|
||||
| Workqueue depth | The number of tasks that Kubernetes queued for the ARC controllers to process. This includes reconciliation requests and tasks from ARC. ARC sequentially processes a work queue of single, small task to avoid concurrency issues. Managing a runner requires multiple steps to prepare, create, update, and delete the runner, its resources, and the ARC custom resources. As each step is completed (or trigger reconciliation), new tasks are queued for processing. As the depth increases, it indicates more tasks awaiting time from the controller. Growth indicates increasing work and may indicate Kubernetes resource contention or processing latencies. Each request for a new runner will result in multiple tasks being added to the work queue to prepare and create the runner and the related ARC custom resources. |
|
||||
| Scrape Duration (seconds) | The amount of time required for Prometheus to read the configured metrics from components in the cluster. An increasing number may indicate a lack of resources for Prometheus and a risk of the process exceeding the configured timeout, leading to lost metrics data. |
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user