Compare commits

..

23 Commits

Author SHA1 Message Date
Nikola Jokic
f914f627f1 Fix workflow run in other tests, add logs 2023-12-07 14:32:41 +01:00
Nikola Jokic
2572fbcb1a Makefile: Renamed e2e-gha to gha-e2e 2023-12-07 14:32:40 +01:00
Nikola Jokic
416c9942f1 Fixed helper and default setup executed successfully 2023-12-07 14:32:40 +01:00
Nikola Jokic
a9b60e4565 Set -e 2023-12-07 14:32:40 +01:00
Nikola Jokic
82d4ab8936 Add env test 2023-12-07 14:32:39 +01:00
Nikola Jokic
20c8c49046 Progress on gh workflow run 2023-12-07 14:32:39 +01:00
Nikola Jokic
c4cee5a195 extract printing tests and add main invocation to tests where I failed to add it 2023-12-07 14:32:39 +01:00
Nikola Jokic
445ef94796 Include tests on proxy and cert 2023-12-07 14:32:38 +01:00
Nikola Jokic
819a9264a0 setup tests for single namespace and dind 2023-12-07 14:32:38 +01:00
Nikola Jokic
a9eb5b6c45 Include tests for default_setup 2023-12-07 14:32:38 +01:00
Nikola Jokic
b3d135408f Initial setup on test files and helper 2023-12-07 14:32:37 +01:00
dependabot[bot]
0bfa57ac50 Bump k8s.io/api from 0.28.3 to 0.28.4 (#3093)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2023-12-04 16:22:33 +01:00
dependabot[bot]
2831d658c4 Bump k8s.io/apimachinery from 0.28.3 to 0.28.4 (#3092)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-04 15:54:48 +01:00
dependabot[bot]
0f40f6ab26 Bump github.com/onsi/gomega from 1.29.0 to 1.30.0 (#3094)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2023-12-01 09:11:44 +01:00
Nikola Jokic
5347e2c2c8 ADR: Changing semantics of min runners to be min idle runners (#3040) 2023-11-30 11:59:10 +01:00
Adam Szaraniec
1cba9c7800 Fix typo in helm chart (#3104)
Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com>
2023-11-30 11:32:59 +01:00
Yusuke Kuoka
2c29cfb994 Bump legacy ARC chart's app version to v0.27.7 (#3008) 2023-11-27 08:03:58 +01:00
Marco Hanisch
4f89ac5878 Add configureable namespace to ServiceMonitors (#3105) 2023-11-27 14:05:48 +09:00
steve21168
64778a828e Add fuse-overlayfs to benefit from union filesystem (#3022) 2023-11-27 12:33:58 +09:00
Donal O'Brien
8e484637f9 Upgrade docker and docker compose in line with GH hosted runners (#3053)
Co-authored-by: Yusuke Kuoka <ykuoka@gmail.com>
2023-11-27 12:25:19 +09:00
Nikola Jokic
b202be712e Set actions client timeout to 5 minutes, add logging to client (#3103) 2023-11-24 17:04:21 +01:00
github-actions[bot]
fb11d3bfd0 Updates: container-hooks to v0.5.0 (#3099)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2023-11-23 15:49:10 +01:00
Toru Komatsu
7793e1974a Record a reason for pod failure in EphemeralRunner (#3074)
Signed-off-by: utam0k <k0ma@utam0k.jp>
2023-11-21 08:26:29 +01:00
35 changed files with 1234 additions and 265 deletions

View File

@@ -17,7 +17,7 @@ env:
PUSH_TO_REGISTRIES: true
TARGET_ORG: actions-runner-controller
TARGET_WORKFLOW: release-runners.yaml
DOCKER_VERSION: 20.10.23
DOCKER_VERSION: 24.0.7
concurrency:
group: ${{ github.workflow }}

View File

@@ -73,7 +73,7 @@ To make your development cycle faster, use the below command to update deploy an
# Makefile
VERSION=controller1 \
RUNNER_TAG=runner1 \
make acceptance/pull acceptance/kind docker-build acceptance/load acceptance/deploy
make acceptance/pull acceptance/kind docker-buildx acceptance/load acceptance/deploy
```
If you've already deployed actions-runner-controller and only want to recreate pods to use the newer image, you can run:

View File

@@ -300,6 +300,10 @@ acceptance/runner/startup:
e2e:
go test -count=1 -v -timeout 600s -run '^TestE2E$$' ./test/e2e
.PHONY: gha-e2e
gha-e2e:
bash hack/e2e-test.sh
# Upload release file to GitHub.
github-release: release
ghr ${VERSION} release/

View File

@@ -15,10 +15,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.23.5
version: 0.23.7
# Used as the default manager tag value when no tag property is provided in the values.yaml
appVersion: 0.27.5
appVersion: 0.27.6
home: https://github.com/actions/actions-runner-controller

View File

@@ -8,154 +8,156 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
> _Default values are the defaults set in the charts `values.yaml`, some properties have default configurations in the code for when the property is omitted or invalid_
| Key | Description | Default |
|----------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------|
| `labels` | Set labels to apply to all resources in the chart | |
| `replicaCount` | Set the number of controller pods | 1 |
| `webhookPort` | Set the containerPort for the webhook Pod | 9443 |
| `syncPeriod` | Set the period in which the controller reconciles the desired runners count | 1m |
| `enableLeaderElection` | Enable election configuration | true |
| `leaderElectionId` | Set the election ID for the controller group | |
| `githubEnterpriseServerURL` | Set the URL for a self-hosted GitHub Enterprise Server | |
| `githubURL` | Override GitHub URL to be used for GitHub API calls | |
| `githubUploadURL` | Override GitHub Upload URL to be used for GitHub API calls | |
| `runnerGithubURL` | Override GitHub URL to be used by runners during registration | |
| `logLevel` | Set the log level of the controller container | |
| `logFormat` | Set the log format of the controller. Valid options are "text" and "json" | text |
| `additionalVolumes` | Set additional volumes to add to the manager container | |
| `additionalVolumeMounts` | Set additional volume mounts to add to the manager container | |
| `authSecret.create` | Deploy the controller auth secret | false |
| `authSecret.name` | Set the name of the auth secret | controller-manager |
| `authSecret.annotations` | Set annotations for the auth Secret | |
| `authSecret.github_app_id` | The ID of your GitHub App. **This can't be set at the same time as `authSecret.github_token`** | |
| `authSecret.github_app_installation_id` | The ID of your GitHub App installation. **This can't be set at the same time as `authSecret.github_token`** | |
| `authSecret.github_app_private_key` | The multiline string of your GitHub App's private key. **This can't be set at the same time as `authSecret.github_token`** | |
| `authSecret.github_token` | Your chosen GitHub PAT token. **This can't be set at the same time as the `authSecret.github_app_*`** | |
| `authSecret.github_basicauth_username` | Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
| `authSecret.github_basicauth_password` | Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
| `dockerRegistryMirror` | The default Docker Registry Mirror used by runners. | |
| `hostNetwork` | The "hostNetwork" of the controller container | false |
| `dnsPolicy` | The "dnsPolicy" of the controller container | ClusterFirst |
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
| `image.tag` | The tag of the controller container | |
| `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest |
| `image.actionsRunnerImagePullSecrets` | Optional image pull secrets to be included in the runner pod's ImagePullSecrets | |
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
| `metrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
| `metrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
| `metrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
| `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | |
| `metrics.port` | Set port of metrics service | 8443 |
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 |
| `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
| `fullnameOverride` | Override the full resource names | |
| `nameOverride` | Override the resource name prefix | |
| `serviceAccount.annotations` | Set annotations to the service account | |
| `serviceAccount.create` | Deploy the controller pod under a service account | true |
| `podAnnotations` | Set annotations for the controller pod | |
| `podLabels` | Set labels for the controller pod | |
| `serviceAccount.name` | Set the name of the service account | |
| `securityContext` | Set the security context for each container in the controller pod | |
| `podSecurityContext` | Set the security context to controller pod | |
| `service.annotations` | Set annotations for the provisioned webhook service resource | |
| `service.port` | Set controller service ports | |
| `service.type` | Set controller service type | |
| `topologySpreadConstraints` | Set the controller pod topologySpreadConstraints | |
| `nodeSelector` | Set the controller pod nodeSelector | |
| `resources` | Set the controller pod resources | |
| `affinity` | Set the controller pod affinity rules | |
| `podDisruptionBudget.enabled` | Enables a PDB to ensure HA of controller pods | false |
| `podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
| `podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
| `tolerations` | Set the controller pod tolerations | |
| `env` | Set environment variables for the controller container | |
| `priorityClassName` | Set the controller pod priorityClassName | |
| `scope.watchNamespace` | Tells the controller and the github webhook server which namespace to watch if `scope.singleNamespace` is true | `Release.Namespace` (the default namespace of the helm chart). |
| `scope.singleNamespace` | Limit the controller to watch a single namespace | false |
| `certManagerEnabled` | Enable cert-manager. If disabled you must set admissionWebHooks.caBundle and create TLS secrets manually | true |
| `runner.statusUpdateHook.enabled` | Use custom RBAC for runners (role, role binding and service account), this will enable reporting runner statuses | false |
| `admissionWebHooks.caBundle` | Base64-encoded PEM bundle containing the CA that signed the webhook's serving certificate | |
| `githubWebhookServer.logLevel` | Set the log level of the githubWebhookServer container | |
| `githubWebhookServer.logFormat` | Set the log format of the githubWebhookServer controller. Valid options are "text" and "json" | text |
| `githubWebhookServer.replicaCount` | Set the number of webhook server pods | 1 |
| `githubWebhookServer.useRunnerGroupsVisibility` | Enable supporting runner groups with custom visibility, you also need to set `githubWebhookServer.secret.enabled` to enable this feature. | false |
| `githubWebhookServer.enabled` | Deploy the webhook server pod | false |
| `githubWebhookServer.queueLimit` | Set the queue size limit in the githubWebhookServer | |
| `githubWebhookServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | false |
| `githubWebhookServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
| `githubWebhookServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
| `githubWebhookServer.imagePullSecrets` | Specifies the secret to be used when pulling the githubWebhookServer pod containers | |
| `githubWebhookServer.nameOverride` | Override the resource name prefix | |
| `githubWebhookServer.fullnameOverride` | Override the full resource names | |
| `githubWebhookServer.serviceAccount.create` | Deploy the githubWebhookServer under a service account | true |
| `githubWebhookServer.serviceAccount.annotations` | Set annotations for the service account | |
| `githubWebhookServer.serviceAccount.name` | Set the service account name | |
| `githubWebhookServer.podAnnotations` | Set annotations for the githubWebhookServer pod | |
| `githubWebhookServer.podLabels` | Set labels for the githubWebhookServer pod | |
| `githubWebhookServer.podSecurityContext` | Set the security context to githubWebhookServer pod | |
| `githubWebhookServer.securityContext` | Set the security context for each container in the githubWebhookServer pod | |
| `githubWebhookServer.resources` | Set the githubWebhookServer pod resources | |
| `githubWebhookServer.topologySpreadConstraints` | Set the githubWebhookServer pod topologySpreadConstraints | |
| `githubWebhookServer.nodeSelector` | Set the githubWebhookServer pod nodeSelector | |
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
| `githubWebhookServer.terminationGracePeriodSeconds` | Set the githubWebhookServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
| `githubWebhookServer.lifecycle` | Set the githubWebhookServer pod lifecycle hooks | `{}` |
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
| `githubWebhookServer.service.loadBalancerSourceRanges` | Set githubWebhookServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
| `githubWebhookServer.ingress.tls` | Set tls configuration for ingress | |
| `githubWebhookServer.ingress.ingressClassName` | Set ingress class name | |
| `githubWebhookServer.podDisruptionBudget.enabled` | Enables a PDB to ensure HA of githubwebhook pods | false |
| `githubWebhookServer.podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
| `githubWebhookServer.podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
| `actionsMetricsServer.logLevel` | Set the log level of the actionsMetricsServer container | |
| `actionsMetricsServer.logFormat` | Set the log format of the actionsMetricsServer controller. Valid options are "text" and "json" | text |
| `actionsMetricsServer.enabled` | Deploy the actions metrics server pod | false |
| Key | Description | Default |
|-----------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------|
| `labels` | Set labels to apply to all resources in the chart | |
| `replicaCount` | Set the number of controller pods | 1 |
| `webhookPort` | Set the containerPort for the webhook Pod | 9443 |
| `syncPeriod` | Set the period in which the controller reconciles the desired runners count | 1m |
| `enableLeaderElection` | Enable election configuration | true |
| `leaderElectionId` | Set the election ID for the controller group | |
| `githubEnterpriseServerURL` | Set the URL for a self-hosted GitHub Enterprise Server | |
| `githubURL` | Override GitHub URL to be used for GitHub API calls | |
| `githubUploadURL` | Override GitHub Upload URL to be used for GitHub API calls | |
| `runnerGithubURL` | Override GitHub URL to be used by runners during registration | |
| `logLevel` | Set the log level of the controller container | |
| `logFormat` | Set the log format of the controller. Valid options are "text" and "json" | text |
| `additionalVolumes` | Set additional volumes to add to the manager container | |
| `additionalVolumeMounts` | Set additional volume mounts to add to the manager container | |
| `authSecret.create` | Deploy the controller auth secret | false |
| `authSecret.name` | Set the name of the auth secret | controller-manager |
| `authSecret.annotations` | Set annotations for the auth Secret | |
| `authSecret.github_app_id` | The ID of your GitHub App. **This can't be set at the same time as `authSecret.github_token`** | |
| `authSecret.github_app_installation_id` | The ID of your GitHub App installation. **This can't be set at the same time as `authSecret.github_token`** | |
| `authSecret.github_app_private_key` | The multiline string of your GitHub App's private key. **This can't be set at the same time as `authSecret.github_token`** | |
| `authSecret.github_token` | Your chosen GitHub PAT token. **This can't be set at the same time as the `authSecret.github_app_*`** | |
| `authSecret.github_basicauth_username` | Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
| `authSecret.github_basicauth_password` | Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
| `dockerRegistryMirror` | The default Docker Registry Mirror used by runners. | |
| `hostNetwork` | The "hostNetwork" of the controller container | false |
| `dnsPolicy` | The "dnsPolicy" of the controller container | ClusterFirst |
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
| `image.tag` | The tag of the controller container | |
| `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest |
| `image.actionsRunnerImagePullSecrets` | Optional image pull secrets to be included in the runner pod's ImagePullSecrets | |
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
| `metrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
| `metrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
| `metrics.serviceMonitor.namespace | Namespace which Prometheus is running in | `Release.Namespace` (the default namespace of the helm chart). |
| `metrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
| `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | |
| `metrics.port` | Set port of metrics service | 8443 |
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 |
| `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
| `fullnameOverride` | Override the full resource names | |
| `nameOverride` | Override the resource name prefix | |
| `serviceAccount.annotations` | Set annotations to the service account | |
| `serviceAccount.create` | Deploy the controller pod under a service account | true |
| `podAnnotations` | Set annotations for the controller pod | |
| `podLabels` | Set labels for the controller pod | |
| `serviceAccount.name` | Set the name of the service account | |
| `securityContext` | Set the security context for each container in the controller pod | |
| `podSecurityContext` | Set the security context to controller pod | |
| `service.annotations` | Set annotations for the provisioned webhook service resource | |
| `service.port` | Set controller service ports | |
| `service.type` | Set controller service type | |
| `topologySpreadConstraints` | Set the controller pod topologySpreadConstraints | |
| `nodeSelector` | Set the controller pod nodeSelector | |
| `resources` | Set the controller pod resources | |
| `affinity` | Set the controller pod affinity rules | |
| `podDisruptionBudget.enabled` | Enables a PDB to ensure HA of controller pods | false |
| `podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
| `podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
| `tolerations` | Set the controller pod tolerations | |
| `env` | Set environment variables for the controller container | |
| `priorityClassName` | Set the controller pod priorityClassName | |
| `scope.watchNamespace` | Tells the controller and the github webhook server which namespace to watch if `scope.singleNamespace` is true | `Release.Namespace` (the default namespace of the helm chart). |
| `scope.singleNamespace` | Limit the controller to watch a single namespace | false |
| `certManagerEnabled` | Enable cert-manager. If disabled you must set admissionWebHooks.caBundle and create TLS secrets manually | true |
| `runner.statusUpdateHook.enabled` | Use custom RBAC for runners (role, role binding and service account), this will enable reporting runner statuses | false |
| `admissionWebHooks.caBundle` | Base64-encoded PEM bundle containing the CA that signed the webhook's serving certificate | |
| `githubWebhookServer.logLevel` | Set the log level of the githubWebhookServer container | |
| `githubWebhookServer.logFormat` | Set the log format of the githubWebhookServer controller. Valid options are "text" and "json" | text |
| `githubWebhookServer.replicaCount` | Set the number of webhook server pods | 1 |
| `githubWebhookServer.useRunnerGroupsVisibility` | Enable supporting runner groups with custom visibility, you also need to set `githubWebhookServer.secret.enabled` to enable this feature. | false |
| `githubWebhookServer.enabled` | Deploy the webhook server pod | false |
| `githubWebhookServer.queueLimit` | Set the queue size limit in the githubWebhookServer | |
| `githubWebhookServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | false |
| `githubWebhookServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
| `githubWebhookServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
| `githubWebhookServer.imagePullSecrets` | Specifies the secret to be used when pulling the githubWebhookServer pod containers | |
| `githubWebhookServer.nameOverride` | Override the resource name prefix | |
| `githubWebhookServer.fullnameOverride` | Override the full resource names | |
| `githubWebhookServer.serviceAccount.create` | Deploy the githubWebhookServer under a service account | true |
| `githubWebhookServer.serviceAccount.annotations` | Set annotations for the service account | |
| `githubWebhookServer.serviceAccount.name` | Set the service account name | |
| `githubWebhookServer.podAnnotations` | Set annotations for the githubWebhookServer pod | |
| `githubWebhookServer.podLabels` | Set labels for the githubWebhookServer pod | |
| `githubWebhookServer.podSecurityContext` | Set the security context to githubWebhookServer pod | |
| `githubWebhookServer.securityContext` | Set the security context for each container in the githubWebhookServer pod | |
| `githubWebhookServer.resources` | Set the githubWebhookServer pod resources | |
| `githubWebhookServer.topologySpreadConstraints` | Set the githubWebhookServer pod topologySpreadConstraints | |
| `githubWebhookServer.nodeSelector` | Set the githubWebhookServer pod nodeSelector | |
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
| `githubWebhookServer.terminationGracePeriodSeconds` | Set the githubWebhookServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
| `githubWebhookServer.lifecycle` | Set the githubWebhookServer pod lifecycle hooks | `{}` |
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
| `githubWebhookServer.service.loadBalancerSourceRanges` | Set githubWebhookServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
| `githubWebhookServer.ingress.tls` | Set tls configuration for ingress | |
| `githubWebhookServer.ingress.ingressClassName` | Set ingress class name | |
| `githubWebhookServer.podDisruptionBudget.enabled` | Enables a PDB to ensure HA of githubwebhook pods | false |
| `githubWebhookServer.podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
| `githubWebhookServer.podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
| `actionsMetricsServer.logLevel` | Set the log level of the actionsMetricsServer container | |
| `actionsMetricsServer.logFormat` | Set the log format of the actionsMetricsServer controller. Valid options are "text" and "json" | text |
| `actionsMetricsServer.enabled` | Deploy the actions metrics server pod | false |
| `actionsMetricsServer.secret.enabled` | Passes the webhook hook secret to the actions-metrics-server | false |
| `actionsMetricsServer.secret.create` | Deploy the webhook hook secret | false |
| `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | actions-metrics-server |
| `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | actions-metrics-server |
| `actionsMetricsServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
| `actionsMetricsServer.imagePullSecrets` | Specifies the secret to be used when pulling the actionsMetricsServer pod containers | |
| `actionsMetricsServer.nameOverride` | Override the resource name prefix | |
| `actionsMetricsServer.fullnameOverride` | Override the full resource names | |
| `actionsMetricsServer.serviceAccount.create` | Deploy the actionsMetricsServer under a service account | true |
| `actionsMetricsServer.imagePullSecrets` | Specifies the secret to be used when pulling the actionsMetricsServer pod containers | |
| `actionsMetricsServer.nameOverride` | Override the resource name prefix | |
| `actionsMetricsServer.fullnameOverride` | Override the full resource names | |
| `actionsMetricsServer.serviceAccount.create` | Deploy the actionsMetricsServer under a service account | true |
| `actionsMetricsServer.serviceAccount.annotations` | Set annotations for the service account | |
| `actionsMetricsServer.serviceAccount.name` | Set the service account name | |
| `actionsMetricsServer.podAnnotations` | Set annotations for the actionsMetricsServer pod | |
| `actionsMetricsServer.podLabels` | Set labels for the actionsMetricsServer pod | |
| `actionsMetricsServer.podSecurityContext` | Set the security context to actionsMetricsServer pod | |
| `actionsMetricsServer.securityContext` | Set the security context for each container in the actionsMetricsServer pod | |
| `actionsMetricsServer.resources` | Set the actionsMetricsServer pod resources | |
| `actionsMetricsServer.topologySpreadConstraints` | Set the actionsMetricsServer pod topologySpreadConstraints | |
| `actionsMetricsServer.nodeSelector` | Set the actionsMetricsServer pod nodeSelector | |
| `actionsMetricsServer.tolerations` | Set the actionsMetricsServer pod tolerations | |
| `actionsMetricsServer.affinity` | Set the actionsMetricsServer pod affinity rules | |
| `actionsMetricsServer.priorityClassName` | Set the actionsMetricsServer pod priorityClassName | |
| `actionsMetricsServer.podAnnotations` | Set annotations for the actionsMetricsServer pod | |
| `actionsMetricsServer.podLabels` | Set labels for the actionsMetricsServer pod | |
| `actionsMetricsServer.podSecurityContext` | Set the security context to actionsMetricsServer pod | |
| `actionsMetricsServer.securityContext` | Set the security context for each container in the actionsMetricsServer pod | |
| `actionsMetricsServer.resources` | Set the actionsMetricsServer pod resources | |
| `actionsMetricsServer.topologySpreadConstraints` | Set the actionsMetricsServer pod topologySpreadConstraints | |
| `actionsMetricsServer.nodeSelector` | Set the actionsMetricsServer pod nodeSelector | |
| `actionsMetricsServer.tolerations` | Set the actionsMetricsServer pod tolerations | |
| `actionsMetricsServer.affinity` | Set the actionsMetricsServer pod affinity rules | |
| `actionsMetricsServer.priorityClassName` | Set the actionsMetricsServer pod priorityClassName | |
| `actionsMetricsServer.terminationGracePeriodSeconds` | Set the actionsMetricsServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
| `actionsMetricsServer.lifecycle` | Set the actionsMetricsServer pod lifecycle hooks | `{}` |
| `actionsMetricsServer.service.type` | Set actionsMetricsServer service type | |
| `actionsMetricsServer.service.ports` | Set actionsMetricsServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
| `actionsMetricsServer.service.type` | Set actionsMetricsServer service type | |
| `actionsMetricsServer.service.ports` | Set actionsMetricsServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
| `actionsMetricsServer.service.loadBalancerSourceRanges` | Set actionsMetricsServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
| `actionsMetricsServer.ingress.enabled` | Deploy an ingress kind for the actionsMetricsServer | false |
| `actionsMetricsServer.ingress.enabled` | Deploy an ingress kind for the actionsMetricsServer | false |
| `actionsMetricsServer.ingress.annotations` | Set annotations for the ingress kind | |
| `actionsMetricsServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
| `actionsMetricsServer.ingress.tls` | Set tls configuration for ingress | |
| `actionsMetricsServer.ingress.ingressClassName` | Set ingress class name | |
| `actionsMetrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
| `actionsMetrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
| `actionsMetrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
| `actionsMetrics.serviceAnnotations` | Set annotations for the provisioned actions metrics service resource | |
| `actionsMetrics.port` | Set port of actions metrics service | 8443 |
| `actionsMetrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
| `actionsMetrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
| `actionsMetrics.serviceMonitor.namespace` | Namespace which Prometheus is running in. | `Release.Namespace` (the default namespace of the helm chart). |
| `actionsMetrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
| `actionsMetrics.serviceAnnotations` | Set annotations for the provisioned actions metrics service resource | |
| `actionsMetrics.port` | Set port of actions metrics service | 8443 |
| `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
| `actionsMetrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
| `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 |

View File

@@ -1,4 +1,5 @@
{{- if and .Values.actionsMetricsServer.enabled .Values.actionsMetrics.serviceMonitor.enable }}
{{- $servicemonitornamespace := .Values.actionsMetrics.serviceMonitor.namespace | default .Release.Namespace }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
@@ -8,7 +9,7 @@ metadata:
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "actions-runner-controller-actions-metrics-server.serviceMonitorName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ $servicemonitornamespace }}
spec:
endpoints:
- path: /metrics

View File

@@ -1,4 +1,5 @@
{{- if and .Values.githubWebhookServer.enabled .Values.metrics.serviceMonitor.enable }}
{{- $servicemonitornamespace := .Values.actionsMetrics.serviceMonitor.namespace | default .Release.Namespace }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
@@ -8,7 +9,7 @@ metadata:
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "actions-runner-controller-github-webhook-server.serviceMonitorName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ $servicemonitornamespace }}
spec:
endpoints:
- path: /metrics

View File

@@ -111,6 +111,7 @@ metrics:
serviceAnnotations: {}
serviceMonitor:
enable: false
namespace: ""
timeout: 30s
interval: 1m
serviceMonitorLabels: {}
@@ -312,6 +313,7 @@ actionsMetrics:
# to deploy the actions-metrics-server whose k8s service is referenced by the service monitor.
serviceMonitor:
enable: false
namespace: ""
timeout: 30s
interval: 1m
serviceMonitorLabels: {}

View File

@@ -102,7 +102,7 @@ flags:
## Defines how the controller should handle upgrades while having running jobs.
##
## The srategies available are:
## The strategies available are:
## - "immediate": (default) The controller will immediately apply the change causing the
## recreation of the listener and ephemeral runner set. This can lead to an
## overprovisioning of runners, if there are pending / running jobs. This should not

View File

@@ -191,7 +191,8 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
case len(ephemeralRunner.Status.Failures) > 5:
log.Info("EphemeralRunner has failed more than 5 times. Marking it as failed")
if err := r.markAsFailed(ctx, ephemeralRunner, log); err != nil {
errMessage := fmt.Sprintf("Pod has failed to start more than 5 times: %s", pod.Status.Message)
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, log); err != nil {
log.Error(err, "Failed to set ephemeral runner to phase Failed")
return ctrl.Result{}, err
}
@@ -423,12 +424,12 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte
return false, multierr.Combine(errs...)
}
func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, errMessage string, log logr.Logger) error {
log.Info("Updating ephemeral runner status to Failed")
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = corev1.PodFailed
obj.Status.Reason = "TooManyPodFailures"
obj.Status.Message = "Pod has failed to start more than 5 times"
obj.Status.Message = errMessage
}); err != nil {
return fmt.Errorf("failed to update ephemeral runner status Phase/Message: %v", err)
}

View File

@@ -1,6 +1,6 @@
# Customize listener pod
**Status**: Proposed
**Status**: Done
## Context

View File

@@ -0,0 +1,19 @@
# Changing semantics of the `minRunners` field
**Status**: Proposed
## Context
Current implementation treats the `minRunners` field as the number of runners that should be running on your cluster. They can be busy running the job, starting up, idle. This ensures faster cold startup time when workflows are acquired as well as trying to use the minimum amount of runners needed to fulfill the scaling requirement.
However, especially large and busy clusters could benefit having `minRunners` as minimum idle runners. When jobs are comming in large batches, the `AutoscalingRunnerSet` should pre-emptively increase the number of idle runners to further decrease the startup time for the next batch. In that scenario, the amount of runners that should be created should be calculated as the number of assigned jobs plus the number of `minRunners`.
## Decision
We will redefine the minRunners field to represent the minimum number of idle runners instead. The total number of runners would then be the sum of jobs assigned to the scale set and the minRunners value. If the maxRunners field is set, the desired number of runners will be the lesser of maxRunners and the sum of minRunners and the number of jobs.
The change in the behavior is completely internal, it does not require any modifications on the user side.
## Consequences
Changing the semantics of the `minRunners` field should result in faster job startup times on spikes as well as on cold startups.

View File

@@ -7,9 +7,9 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net/http"
"net/url"
"strconv"
@@ -57,6 +57,28 @@ type ActionsService interface {
SetUserAgent(info UserAgentInfo)
}
type clientLogger struct {
logr.Logger
}
func (l *clientLogger) Info(msg string, keysAndValues ...interface{}) {
l.Logger.Info(msg, keysAndValues...)
}
func (l *clientLogger) Debug(msg string, keysAndValues ...interface{}) {
// discard debug log
}
func (l *clientLogger) Error(msg string, keysAndValues ...interface{}) {
l.Logger.Error(errors.New(msg), "Retryable client error", keysAndValues...)
}
func (l *clientLogger) Warn(msg string, keysAndValues ...interface{}) {
l.Logger.Info(msg, keysAndValues...)
}
var _ retryablehttp.LeveledLogger = &clientLogger{}
type Client struct {
*http.Client
@@ -168,11 +190,13 @@ func NewClient(githubConfigURL string, creds *ActionsAuth, options ...ClientOpti
}
retryClient := retryablehttp.NewClient()
retryClient.Logger = log.New(io.Discard, "", log.LstdFlags)
retryClient.Logger = &clientLogger{Logger: ac.logger}
retryClient.RetryMax = ac.retryMax
retryClient.RetryWaitMax = ac.retryWaitMax
retryClient.HTTPClient.Timeout = 5 * time.Minute // timeout must be > 1m to accomodate long polling
transport, ok := retryClient.HTTPClient.Transport.(*http.Transport)
if !ok {
// this should always be true, because retryablehttp.NewClient() uses

7
go.mod
View File

@@ -18,7 +18,7 @@ require (
github.com/kelseyhightower/envconfig v1.4.0
github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.13.1
github.com/onsi/gomega v1.29.0
github.com/onsi/gomega v1.30.0
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.17.0
github.com/stretchr/testify v1.8.4
@@ -30,8 +30,8 @@ require (
golang.org/x/sync v0.5.0
gomodules.xyz/jsonpatch/v2 v2.4.0
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.28.3
k8s.io/apimachinery v0.28.3
k8s.io/api v0.28.4
k8s.io/apimachinery v0.28.4
k8s.io/client-go v0.28.3
sigs.k8s.io/controller-runtime v0.16.3
sigs.k8s.io/yaml v1.4.0
@@ -74,7 +74,6 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/moby/spdystream v0.2.0 // indirect

104
go.sum
View File

@@ -7,7 +7,6 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo=
github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
@@ -19,10 +18,7 @@ github.com/bradleyfalzon/ghinstallation/v2 v2.8.0/go.mod h1:fmPmvCiBWhJla3zDv9ZT
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/cloudflare/circl v1.3.5 h1:g+wWynZqVALYAlpSQFAa7TscDnUK8mKYtrxMpw6AUKo=
github.com/cloudflare/circl v1.3.5/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/cloudflare/circl v1.3.6 h1:/xbKIqSHbZXHwkhbrhrt2YOHIwYJlXH94E3tI/gDlUg=
github.com/cloudflare/circl v1.3.6/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
@@ -41,8 +37,6 @@ github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
@@ -50,13 +44,8 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 h1:skJKxRtNmevLqnayafdLe2AsenqRupVmzZSqrvb5caU=
github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo=
github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
@@ -108,18 +97,10 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ=
github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0=
github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/pprof v0.0.0-20231101202521-4ca4178f5c7a h1:fEBsGL/sjAuJrgah5XqmmYsTLzJp/TO9Lhy39gkverk=
github.com/google/pprof v0.0.0-20231101202521-4ca4178f5c7a/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
@@ -135,8 +116,6 @@ github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxC
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA=
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M=
github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@@ -155,7 +134,6 @@ github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
@@ -171,8 +149,6 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA
github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 h1:ofNAzWCcyTALn2Zv40+8XitdzCgXY6e9qvXwN9W0YXg=
github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
@@ -192,17 +168,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
github.com/onsi/ginkgo/v2 v2.13.1 h1:LNGfMbR2OVGBfXjvRZIZ2YCTQdGKtPLvuI1rMCCj3OU=
github.com/onsi/ginkgo/v2 v2.13.1/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg=
github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@@ -213,8 +184,6 @@ github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
@@ -239,7 +208,6 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
@@ -252,16 +220,11 @@ github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -270,55 +233,38 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ=
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY=
golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0=
golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0=
golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -334,19 +280,14 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -354,8 +295,6 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -365,25 +304,17 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.4.0 h1:Z81tqI5ddIoXDPvVQ7/7CC9TnLM7ubaFG2qXYd5BbYY=
golang.org/x/time v0.4.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8=
golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -405,7 +336,6 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
@@ -421,47 +351,27 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw=
k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg=
k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM=
k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc=
k8s.io/apiextensions-apiserver v0.28.2 h1:J6/QRWIKV2/HwBhHRVITMLYoypCoPY1ftigDM0Kn+QU=
k8s.io/apiextensions-apiserver v0.28.2/go.mod h1:5tnkxLGa9nefefYzWuAlWZ7RZYuN/765Au8cWLA6SRg=
k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY=
k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0=
k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2EtmY08=
k8s.io/apiextensions-apiserver v0.28.3/go.mod h1:NE1XJZ4On0hS11aWWJUTNkmVB03j9LM7gJSisbRt8Lc=
k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ=
k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU=
k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A=
k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8=
k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY=
k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY=
k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8=
k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg=
k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4=
k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo=
k8s.io/component-base v0.28.2 h1:Yc1yU+6AQSlpJZyvehm/NkJBII72rzlEsd6MkBQ+G0E=
k8s.io/component-base v0.28.2/go.mod h1:4IuQPQviQCg3du4si8GpMrhAIegxpsgPngPRR/zWpzc=
k8s.io/component-base v0.28.3 h1:rDy68eHKxq/80RiMb2Ld/tbH8uAE75JdCqJyi6lXMzI=
k8s.io/component-base v0.28.3/go.mod h1:fDJ6vpVNSk6cRo5wmDa6eKIG7UlIQkaFmZN2fYgIUD8=
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
k8s.io/kube-openapi v0.0.0-20231113174909-778a5567bc1e h1:snPmy96t93RredGRjKfMFt+gvxuVAncqSAyBveJtr4Q=
k8s.io/kube-openapi v0.0.0-20231113174909-778a5567bc1e/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/controller-runtime v0.16.2 h1:mwXAVuEk3EQf478PQwQ48zGOXvW27UJc8NHktQVuIPU=
sigs.k8s.io/controller-runtime v0.16.2/go.mod h1:vpMu3LpI5sYWtujJOa2uPK61nB5rbwlN7BAB8aSLvGU=
sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4=
sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk=
sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

92
hack/e2e-test.sh Executable file
View File

@@ -0,0 +1,92 @@
#!/bin/bash
DIR="$(dirname "${BASH_SOURCE[0]}")"
DIR="$(realpath "${DIR}")"
TEST_DIR="$(realpath "${DIR}/../test/actions.github.com")"
export PLATFORMS="linux/amd64"
TARGETS=()
function set_targets() {
local cases="$(find "${TEST_DIR}" -name '*.test.sh' | sed "s#^${TEST_DIR}/##g" )"
mapfile -t TARGETS < <(echo "${cases}")
echo $TARGETS
}
function env_test() {
if [[ -z "${GITHUB_TOKEN}" ]]; then
echo "Error: GITHUB_TOKEN is not set"
exit 1
fi
if [[ -z "${TARGET_ORG}" ]]; then
echo "Error: TARGET_ORG is not set"
exit 1
fi
if [[ -z "${TARGET_REPO}" ]]; then
echo "Error: TARGET_REPO is not set"
exit 1
fi
}
function usage() {
echo "Usage: $0 [test_name]"
echo " test_name: the name of the test to run"
echo " if not specified, all tests will be run"
echo " test_name should be the name of the test file without the .test.sh suffix"
echo ""
exit 1
}
function main() {
local failed=()
env_test
if [[ -z "${1}" ]]; then
echo "Running all tests"
set_targets
elif [[ -f "${TEST_DIR}/${1}.test.sh" ]]; then
echo "Running test ${1}"
TARGETS=("${1}.test.sh")
else
usage
fi
for target in "${TARGETS[@]}"; do
echo "============================================================"
test="${TEST_DIR}/${target}"
if [[ ! -x "${test}" ]]; then
echo "Error: test ${test} is not executable or not found"
failed+=("${test}")
continue
fi
echo "Running test ${target}"
if ! "${test}"; then
failed+=("${target}")
echo "---------------------------------"
echo "FAILED: ${target}"
else
echo "---------------------------------"
echo "PASSED: ${target}"
fi
echo "============================================================"
done
if [[ "${#failed[@]}" -gt 0 ]]; then
echo "Failed tests:"
for fail in "${failed[@]}"; do
echo " ${fail}"
done
exit 1
fi
}
main $@

View File

@@ -7,8 +7,8 @@ OS_IMAGE ?= ubuntu-22.04
TARGETPLATFORM ?= $(shell arch)
RUNNER_VERSION ?= 2.311.0
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.4.0
DOCKER_VERSION ?= 20.10.23
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.5.0
DOCKER_VERSION ?= 24.0.7
# default list of platforms for which multiarch image is built
ifeq (${PLATFORMS}, )

View File

@@ -1,2 +1,2 @@
RUNNER_VERSION=2.311.0
RUNNER_CONTAINER_HOOKS_VERSION=0.4.0
RUNNER_CONTAINER_HOOKS_VERSION=0.5.0

View File

@@ -5,7 +5,7 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION
# Docker and Docker Compose arguments
ENV CHANNEL=stable
ARG DOCKER_COMPOSE_VERSION=v2.20.0
ARG DOCKER_COMPOSE_VERSION=v2.23.0
ARG DUMB_INIT_VERSION=1.2.5
# Other arguments

View File

@@ -5,7 +5,7 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION
# Docker and Docker Compose arguments
ENV CHANNEL=stable
ARG DOCKER_COMPOSE_VERSION=v2.20.0
ARG DOCKER_COMPOSE_VERSION=v2.23.0
ARG DUMB_INIT_VERSION=1.2.5
ARG RUNNER_USER_UID=1001
@@ -30,6 +30,7 @@ RUN apt-get update -y \
uidmap \
unzip \
zip \
fuse-overlayfs \
&& rm -rf /var/lib/apt/lists/*
# Download latest git-lfs version

View File

@@ -5,8 +5,8 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION
# Docker and Docker Compose arguments
ARG CHANNEL=stable
ARG DOCKER_VERSION=20.10.23
ARG DOCKER_COMPOSE_VERSION=v2.20.0
ARG DOCKER_VERSION=24.0.7
ARG DOCKER_COMPOSE_VERSION=v2.23.0
ARG DUMB_INIT_VERSION=1.2.5
# Use 1001 and 121 for compatibility with GitHub-hosted runners

View File

@@ -5,8 +5,8 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION
# Docker and Docker Compose arguments
ARG CHANNEL=stable
ARG DOCKER_VERSION=20.10.23
ARG DOCKER_COMPOSE_VERSION=v2.20.0
ARG DOCKER_VERSION=24.0.7
ARG DOCKER_COMPOSE_VERSION=v2.23.0
ARG DUMB_INIT_VERSION=1.2.5
ARG RUNNER_USER_UID=1001
ARG DOCKER_GROUP_GID=121

View File

@@ -5,8 +5,8 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION
# Docker and Docker Compose arguments
ARG CHANNEL=stable
ARG DOCKER_VERSION=20.10.23
ARG DOCKER_COMPOSE_VERSION=v2.20.0
ARG DOCKER_VERSION=24.0.7
ARG DOCKER_COMPOSE_VERSION=v2.23.0
ARG DUMB_INIT_VERSION=1.2.5
# Use 1001 and 121 for compatibility with GitHub-hosted runners

View File

@@ -5,8 +5,8 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION
# Docker and Docker Compose arguments
ARG CHANNEL=stable
ARG DOCKER_VERSION=20.10.23
ARG DOCKER_COMPOSE_VERSION=v2.20.0
ARG DOCKER_VERSION=24.0.7
ARG DOCKER_COMPOSE_VERSION=v2.23.0
ARG DUMB_INIT_VERSION=1.2.5
ARG RUNNER_USER_UID=1001
ARG DOCKER_GROUP_GID=121

View File

@@ -0,0 +1,84 @@
#!/bin/bash
set -e
DIR="$(dirname "${BASH_SOURCE[0]}")"
DIR="$(realpath "${DIR}")"
ROOT_DIR="$(ralpath "${DIR}/../../")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="anonymous-proxy-$(date '+%M%S')$((($RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
${ROOT_DIR}/charts/gha-runner-scale-set-controller \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAME}/${SCALE_SET_NAMESPACE}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set proxy.https.url="http://host.minikube.internal:3128" \
--set "proxy.noProxy[0]=10.96.0.1:443" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function run_squid() {
echo "Running squid"
docker run -d \
--name squid \
--publish 3128:3128 \
ubuntu/squid:latest
}
function main() {
local failed=()
build_image
create_cluster
install_arc
run_squid
install_scale_set
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
delete_cluster
print_failed_tests "${failed[@]}"
}
main

View File

@@ -0,0 +1,91 @@
#!/bin/bash
set -e
DIR="$(dirname "${BASH_SOURCE[0]}")"
DIR="$(realpath "${DIR}")"
ROOT_DIR="$(ralpath "${DIR}/../../")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="auth-proxy-$(date '+%M%S')$((($RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
${ROOT_DIR}/charts/gha-runner-scale-set-controller \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Creating namespace ${SCALE_SET_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing proxy secret"
kubectl create secret generic proxy-auth \
--namespace="${SCALE_SET_NAMESPACE}" \
--from-literal=username=github \
--from-literal=password='actions'
echo "Installing scale set ${SCALE_SET_NAME}/${SCALE_SET_NAMESPACE}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set proxy.https.url="http://host.minikube.internal:3128" \
--set proxy.https.credentialSecretRef="proxy-auth" \
--set "proxy.noProxy[0]=10.96.0.1:443" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function run_squid() {
echo "Running squid"
docker run -d \
--name squid \
--publish 3128:3128 \
huangtingluo/squid-proxy:latest
}
function main() {
local failed=()
build_image
create_cluster
install_arc
run_squid
install_scale_set
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@@ -0,0 +1,73 @@
#!/bin/bash
set -e
DIR="$(dirname "${BASH_SOURCE[0]}")"
DIR="$(realpath "${DIR}")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="default-$(date +'%M%S')$(((${RANDOM} + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
${ROOT_DIR}/charts/gha-runner-scale-set-controller \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
${ROOT_DIR}/charts/gha-runner-scale-set \
--version="${VERSION}" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_scale_set
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@@ -0,0 +1,70 @@
#!/bin/bash
set -e
DIR="$(dirname "${BAASH_SOURCE[0]}")"
DIR="$(realpath "${DIR}")"
ROOT_DIR="$(realpath "${DIR}/../../")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="dind-$(date +'%M%S')$(((${RANDOM} + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-dind-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
${ROOT_DIR}/charts/gha-runner-scale-set-controller \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set containerMode.type="dind" \
${ROOT_DIR}/charts/gha-runner-scale-set \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${}"
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_scale_set
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" arc_logs
delete_cluster
print_results "${failed[@]}"
}
main

View File

@@ -0,0 +1,3 @@
export TARGET_ORG="org"
export TARGET_REPO="repo"
export GITHUB_TOKEN="token"

View File

@@ -0,0 +1,165 @@
#!/bin/bash
DIR="$(dirname "${BASH_SOURCE[0]}")"
DIR="$(realpath "${DIR}")"
ROOT_DIR="$(realpath "${DIR}/../..")"
export TARGET_ORG="${TARGET_ORG:-actions-runner-controller}"
export TARGET_REPO="${TARGET_REPO:-arc_e2e_test_dummy}"
export IMAGE_NAME="${IMAGE_NAME:-arc-test-image}"
export VERSION="${VERSION:-$(yq .version < "${ROOT_DIR}/charts/gha-runner-scale-set-controller/Chart.yaml")}"
export IMAGE_VERSION="${IMAGE_VERSION:-${VERSION}}"
function build_image() {
echo "Building ARC image ${IMAGE_NAME}:${IMAGE_VERSION}"
cd ${ROOT_DIR}
export DOCKER_CLI_EXPERIMENTAL=enabled
export DOCKER_BUILDKIT=1
docker buildx build --platform ${PLATFORMS} \
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
--build-arg VERSION=${VERSION} \
--build-arg COMMIT_SHA=${COMMIT_SHA} \
-t "${IMAGE_NAME}:${IMAGE_VERSION}" \
-f Dockerfile \
. --load
echo "Created image ${IMAGE_NAME}:${IMAGE_VERSION}"
cd -
}
function create_cluster() {
echo "Deleting minikube cluster if exists"
minikube delete || true
echo "Creating minikube cluster"
minikube start
echo "Loading image into minikube cluster"
minikube image load "${IMAGE_NAME}:${IMAGE_VERSION}"
}
function delete_cluster() {
echo "Deleting minikube cluster"
minikube delete
}
function log_arc() {
echo "ARC logs"
kubectl logs -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-rs-controller
}
function wait_for_arc() {
echo "Waiting for ARC to be ready"
local count=0;
while true; do
POD_NAME=$(kubectl get pods -n ${NAMESPACE} -l app.kubernetes.io/name=gha-rs-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
return 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-rs-controller
kubectl get pod -n "${NAMESPACE}"
kubectl describe deployment "${NAME}" -n "${NAMESPACE}"
}
function wait_for_scale_set() {
local count=0
while true; do
POD_NAME=$(kubectl get pods -n ${NAMESPACE} -l actions.github.com/scale-set-name=${NAME} -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: ${POD_NAME}"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=${NAME}"
return 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n ${NAMESPACE} -l actions.github.com/scale-set-name=${NAME}
kubectl get pod -n ${NAMESPACE}
}
function cleanup_scale_set() {
helm uninstall "${INSTALLATION_NAME}" --namespace "${NAMESPACE}" --debug
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n "${NAMESPACE}" -l app.kubernetes.io/instance="${INSTALLATION_NAME}"
}
function install_openebs() {
echo "Install openebs/dynamic-localpv-provisioner"
helm repo add openebs https://openebs.github.io/charts
helm repo update
helm install openebs openebs/openebs --namespace openebs --create-namespace
}
function print_results() {
local failed=("$@")
if [[ "${#failed[@]}" -ne 0 ]]; then
echo "----------------------------------"
echo "The following tests failed:"
for test in "${failed[@]}"; do
echo " - ${test}"
done
return 1
else
echo "----------------------------------"
echo "All tests passed!"
fi
}
function run_workflow() {
echo "Checking if the workflow file exists"
gh workflow view -R "${TARGET_ORG}/${TARGET_REPO}" "${WORKFLOW_FILE}" &> /dev/null || return 1
local queue_time="$(date -u +%FT%TZ)"
echo "Running workflow ${workflow_file}"
gh workflow run -R "${TARGET_ORG}/${TARGET_REPO}" "${WORKFLOW_FILE}" --ref main -f arc_name="${SCALE_SET_NAME}" || return 1
echo "Waiting for run to start"
local count=0
local run_id=
while true; do
if [[ "${count}" -ge 12 ]]; then
echo "Timeout waiting for run to start"
return 1
fi
run_id=$(gh run list -R "${TARGET_ORG}/${TARGET_REPO}" --workflow "${WORKFLOW_FILE}" --created ">${queue_time}" --json "name,databaseId" --jq ".[] | select(.name | contains(\"${SCALE_SET_NAME}\")) | .databaseId")
echo "Run ID: ${run_id}"
if [ -n "$run_id" ]; then
echo "Run found!"
break
fi
echo "Run not found yet, waiting 5 seconds"
sleep 5
count=$((count+1))
done
echo "Waiting for run to complete"
local code=$(gh run watch "${run_id}" -R "${TARGET_ORG}/${TARGET_REPO}" --exit-status &> /dev/null)
if [[ "${code}" -ne 0 ]]; then
echo "Run failed with exit code ${code}"
return 1
fi
echo "Run completed successfully"
}

View File

@@ -0,0 +1,74 @@
#!/bin/bash
set -e
DIR="$(dirname "${BASH_SOURCE[0]}")"
DIR="$(realpath "${DIR}")"
ROOT_DIR="$(ralpath "${DIR}/../../")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="kube-mode-$(date '%M%S')$((($RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-kubernetes-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_VERSION}" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAME}/${SCALE_SET_NAMESPACE}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
-- githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set containerMode.type="kubernetes" \
--set containerMode.kubernetesModeWorkVolumeClaim.accessModes={"ReadWriteOnce"} \
--set containerMode.kubernetesModeWorkVolumeClaim.storageClassName="openebs-hostpath" \
--set containerModde.kubernetesModeWorkVolumeClaim.resources.requests.storage="1Gi" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function main() {
local failed=()
build_image
create_cluster
install_openebs
install_arc
install_scale_set
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" arc_logs
delete_cluster
print_results "${failed[@]}"
}
main

View File

@@ -0,0 +1,135 @@
#!/bin/bash
set -e
DIR="$(dirname "${BASH_SOURCE[0]}")"
DIR="$(realpath "${DIR}")"
ROOT_DIR="$(ralpath "${DIR}/../../")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="self-signed-crt-$(date '+%M%S')$((($RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
${ROOT_DIR}/charts/gha-runner-scale-set-controller \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Creating namespace ${SCALE_SET_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ca-cert config map"
kubectl -n "${SCALE_SET_NAMESPACE}" create configmap ca-cert \
--from-file="${DIR}/mitmproxy/mitmproxy-ca-cert.pem"
echo "Config map:"
kubectl -n "${SCALE_SET_NAMESPACE}" get configmap ca-cert -o yaml
echo "Installing scale set ${SCALE_SET_NAME}/${SCALE_SET_NAMESPACE}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set proxy.https.url="http://host.minikube.internal:3128" \
--set "proxy.noProxy[0]=10.96.0.1:443" \
--set "githubServerTLS.certificateFrom.configMapKeyRef.name=ca-cert"
--set "githubServerTLS.certificateFrom.configMapKeyRef.key=mitmproxy-ca-cert.pem"
--set "githubServerTLS.runnerMountPath=/usr/local/share/ca-certificates/" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function wait_for_mitmproxy_cert() {
echo "Waiting for mitmproxy generated CA certificate"
local count=0
while true; do
if [ -f "./mitmproxy/mitmproxy-ca-cert.pem" ]; then
echo "CA certificate is generated"
echo "CA certificate:"
cat "./mitmproxy/mitmproxy-ca-cert.pem"
return 0
fi
if [ "${count}" -ge 60 ]; then
echo "Timeout waiting for mitmproxy generated CA certificate"
return 1
fi
sleep 1
count=$((count + 1))
done
}
function run_mitmproxy() {
echo "Running mitmproxy"
docker run -d \
--rm \
--name mitmproxy \
--publish 8080:8080 \
-b ./mitmproxy:/home/mitmproxy/.mitmproxy \
mitmproxy/mitmproxy:latest \
echo "Mitm dump:"
mitmdump
if ! wait_for_mitmproxy_cert; then
return 1
fi
echo "CA certificate is generated"
sudo cp ./mitmproxy/mitmproxy-ca-cert.pem /usr/local/share/ca-certificates/mitmproxy-ca-cert.crt
sudo chown runner ./mitmproxy/mitmproxy-ca-cert.crt
}
function main() {
if [[ ! -x "$(which mitmdump)" ]]; then
echo "mitmdump is not installed"
return 1
fi
local failed=()
build_image
create_cluster
install_arc
run_mitmproxy
install_scale_set
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" arc_logs
delete_cluster
print_results "${failed[@]}"
}
main

View File

@@ -0,0 +1,73 @@
#!/bin/bash
set -e
DIR="$(dirname "${BASH_SOURCE[0]}")"
DIR="$(realpath "${DIR}")"
ROOT_DIR="$(ralpath "${DIR}/../../")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="single-$(date '+%M%S')$((($RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
${ROOT_DIR}/charts/gha-runner-scale-set-controller \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
${ROOT_DIR}/charts/gha-runner-scale-set \
--version="${VERSION}" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_scale_set
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" arc_logs
delete_cluster
print_results "${failed[@]}"
}
main

View File

@@ -0,0 +1,145 @@
#!/bin/bash
set -e
DIR="$(dirname "${BASH_SOURCE[0]}")"
DIR="$(realpath "${DIR}")"
ROOT_DIR="$(ralpath "${DIR}/../../")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="update-strategy-$(date '+%M%S')$((($RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-sleepy-matrix.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
--set flags.updateStrategy="eventual" \
${ROOT_DIR}/charts/gha-runner-scale-set-controller \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAME}/${SCALE_SET_NAMESPACE}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function upgrade_scale_set() {
echo "Upgrading scale set ${SCALE_SET_NAME}/${SCALE_SET_NAMESPACE}"
helm upgrade "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set template.spec.containers[0].name="runner" \
--set template.spec.containers[0].image="ghcr.io/actions/actions-runner:latest" \
--set template.spec.containers[0].command={"/home/runner/run.sh"} \
--set template.spec.containers[0].env[0].name="TEST" \
--set template.spec.containers[0].env[0].value="E2E TESTS" \
${ROOT_DIR}/charts/gha-runner-scale-set \
--version="${VERSION}" \
--debug
}
function assert_listener_deleted() {
local count=0
while true; do
LISTENER_COUNT="$(kubectl get pods -l actions.github.com/scale-set-name="${SCALE_SET_NAME}" -n "${ARC_NAMESPACE}" --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
RUNNERS_COUNT="$(kubectl get pods -l app.kubernetes.io/component=runner -n "${SCALE_SET_NAMESPACE}" --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
RESOURCES="$(kubectl get pods -A)"
if [ "${LISTENER_COUNT}" -eq 0 ]; then
echo "Listener has been deleted"
echo "${RESOURCES}"
return 0
fi
if [ "${count}" -ge 60 ]; then
echo "Timeout waiting for listener to be deleted"
echo "${RESOURCES}"
return 1
fi
echo "Waiting for listener to be deleted"
echo "Listener count: ${LISTENER_COUNT} target: 0 | Runners count: ${RUNNERS_COUNT} target: 3"
sleep 1
count=$((count+1))
done
}
function assert_listener_recreated() {
count=0
while true; do
LISTENER_COUNT="$(kubectl get pods -l actions.github.com/scale-set-name="${SCALE_SET_NAME}" -n "${ARC_NAMESPACE}" --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
RUNNERS_COUNT="$(kubectl get pods -l app.kubernetes.io/component=runner -n "${SCALE_SET_NAMESPACE}" --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
RESOURCES="$(kubectl get pods -A)"
if [ "${LISTENER_COUNT}" -eq 1 ]; then
echo "Listener is up!"
echo "${RESOURCES}"
exit 0
fi
if [ "${count}" -ge 120 ]; then
echo "Timeout waiting for listener to be recreated"
echo "${RESOURCES}"
exit 1
fi
echo "Waiting for listener to be recreated"
echo "Listener count: ${LISTENER_COUNT} target: 1 | Runners count: ${RUNNERS_COUNT} target: 0"
sleep 1
count=$((count+1))
done
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_scale_set
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
upgrade_scale_set || failed+=("upgrade_scale_set")
assert_listener_deleted || failed+=("assert_listener_deleted")
assert_listener_recreated || failed+=("assert_listener_recreated")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" arc_logs
delete_cluster
print_results "${failed[@]}"
}
main

View File

@@ -37,7 +37,7 @@ var (
testResultCMNamePrefix = "test-result-"
RunnerVersion = "2.311.0"
RunnerContainerHooksVersion = "0.4.0"
RunnerContainerHooksVersion = "0.5.0"
)
// If you're willing to run this test via VS Code "run test" or "debug test",
@@ -459,7 +459,7 @@ func buildVars(repo, ubuntuVer string) vars {
runnerRootlessDindImage = testing.Img(runnerRootlessDindImageRepo, runnerImageTag)
dindSidecarImageRepo = "docker"
dindSidecarImageTag = "20.10.23-dind"
dindSidecarImageTag = "24.0.7-dind"
dindSidecarImage = testing.Img(dindSidecarImageRepo, dindSidecarImageTag)
)