mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-26 03:27:31 +08:00
Compare commits
6 Commits
762c9ac698
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
85cbe444cc | ||
|
|
bfe78ccd5d | ||
|
|
3fd1048576 | ||
|
|
180e0dabb2 | ||
|
|
50038fba61 | ||
|
|
82d5579696 |
2
.github/workflows/gha-publish-chart.yaml
vendored
2
.github/workflows/gha-publish-chart.yaml
vendored
@@ -75,7 +75,7 @@ jobs:
|
||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f
|
||||
with:
|
||||
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
|
||||
# BuildKit v0.11 which has a bug causing intermittent
|
||||
|
||||
2
.github/workflows/gha-validate-chart.yaml
vendored
2
.github/workflows/gha-validate-chart.yaml
vendored
@@ -67,7 +67,7 @@ jobs:
|
||||
ct lint --config charts/.ci/ct-config-gha.yaml
|
||||
|
||||
- name: Set up docker buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
with:
|
||||
version: latest
|
||||
|
||||
2
.github/workflows/global-publish-canary.yaml
vendored
2
.github/workflows/global-publish-canary.yaml
vendored
@@ -113,7 +113,7 @@ jobs:
|
||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f
|
||||
with:
|
||||
version: latest
|
||||
|
||||
|
||||
2
.github/workflows/go.yaml
vendored
2
.github/workflows/go.yaml
vendored
@@ -48,7 +48,7 @@ jobs:
|
||||
go-version-file: "go.mod"
|
||||
cache: false
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@e7fa5ac41e1cf5b7d48e45e42232ce7ada589601
|
||||
uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20
|
||||
with:
|
||||
only-new-issues: true
|
||||
version: v2.5.0
|
||||
|
||||
@@ -211,7 +211,14 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
// TODO: make sure the role binding has the up-to-date role and service account
|
||||
|
||||
listenerPod := new(corev1.Pod)
|
||||
if err := r.Get(ctx, client.ObjectKey{Namespace: autoscalingListener.Namespace, Name: autoscalingListener.Name}, listenerPod); err != nil {
|
||||
if err := r.Get(
|
||||
ctx,
|
||||
client.ObjectKey{
|
||||
Namespace: autoscalingListener.Namespace,
|
||||
Name: autoscalingListener.Name,
|
||||
},
|
||||
listenerPod,
|
||||
); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to get listener pod", "namespace", autoscalingListener.Namespace, "name", autoscalingListener.Name)
|
||||
return ctrl.Result{}, err
|
||||
@@ -229,37 +236,30 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
|
||||
cs := listenerContainerStatus(listenerPod)
|
||||
switch {
|
||||
case listenerPod.Status.Reason == "Evicted":
|
||||
log.Info(
|
||||
"Listener pod is evicted",
|
||||
"phase", listenerPod.Status.Phase,
|
||||
"reason", listenerPod.Status.Reason,
|
||||
"message", listenerPod.Status.Message,
|
||||
)
|
||||
|
||||
return ctrl.Result{}, r.deleteListenerPod(ctx, autoscalingListener, listenerPod, log)
|
||||
|
||||
case cs == nil:
|
||||
log.Info("Listener pod is not ready", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
return ctrl.Result{}, nil
|
||||
case cs.State.Terminated != nil:
|
||||
log.Info("Listener pod is terminated", "namespace", listenerPod.Namespace, "name", listenerPod.Name, "reason", cs.State.Terminated.Reason, "message", cs.State.Terminated.Message)
|
||||
log.Info(
|
||||
"Listener pod is terminated",
|
||||
"namespace", listenerPod.Namespace,
|
||||
"name", listenerPod.Name,
|
||||
"reason", cs.State.Terminated.Reason,
|
||||
"message", cs.State.Terminated.Message,
|
||||
)
|
||||
|
||||
if err := r.publishRunningListener(autoscalingListener, false); err != nil {
|
||||
log.Error(err, "Unable to publish runner listener down metric", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
}
|
||||
return ctrl.Result{}, r.deleteListenerPod(ctx, autoscalingListener, listenerPod, log)
|
||||
|
||||
if listenerPod.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
if err := r.Delete(ctx, listenerPod); err != nil && !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// delete the listener config secret as well, so it gets recreated when the listener pod is recreated, with any new data if it exists
|
||||
var configSecret corev1.Secret
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &configSecret)
|
||||
switch {
|
||||
case err == nil && configSecret.DeletionTimestamp.IsZero():
|
||||
log.Info("Deleting the listener config secret")
|
||||
if err := r.Delete(ctx, &configSecret); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to delete listener config secret: %w", err)
|
||||
}
|
||||
case !kerrors.IsNotFound(err):
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get the listener config secret: %w", err)
|
||||
}
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
case cs.State.Running != nil:
|
||||
if err := r.publishRunningListener(autoscalingListener, true); err != nil {
|
||||
log.Error(err, "Unable to publish running listener", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
@@ -269,10 +269,39 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) deleteListenerPod(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerPod *corev1.Pod, log logr.Logger) error {
|
||||
if err := r.publishRunningListener(autoscalingListener, false); err != nil {
|
||||
log.Error(err, "Unable to publish runner listener down metric", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
}
|
||||
|
||||
if listenerPod.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
if err := r.Delete(ctx, listenerPod); err != nil && !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
// delete the listener config secret as well, so it gets recreated when the listener pod is recreated, with any new data if it exists
|
||||
var configSecret corev1.Secret
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &configSecret)
|
||||
switch {
|
||||
case err == nil && configSecret.DeletionTimestamp.IsZero():
|
||||
log.Info("Deleting the listener config secret")
|
||||
if err := r.Delete(ctx, &configSecret); err != nil {
|
||||
return fmt.Errorf("failed to delete listener config secret: %w", err)
|
||||
}
|
||||
case !kerrors.IsNotFound(err):
|
||||
return fmt.Errorf("failed to get the listener config secret: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (requeue bool, err error) {
|
||||
logger.Info("Cleaning up the listener pod")
|
||||
listenerPod := new(corev1.Pod)
|
||||
|
||||
@@ -671,6 +671,55 @@ var _ = Describe("Test AutoScalingListener customization", func() {
|
||||
autoscalingListenerTestInterval,
|
||||
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be created")
|
||||
})
|
||||
|
||||
It("Should re-create pod when the listener pod is evicted", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(
|
||||
ctx,
|
||||
client.ObjectKey{
|
||||
Name: autoscalingListener.Name,
|
||||
Namespace: autoscalingListener.Namespace,
|
||||
},
|
||||
pod,
|
||||
)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return pod.Name, nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
).Should(
|
||||
BeEquivalentTo(autoscalingListener.Name),
|
||||
"Pod should be created",
|
||||
)
|
||||
|
||||
updated := pod.DeepCopy()
|
||||
oldPodUID := string(pod.UID)
|
||||
updated.Status.Reason = "Evicted"
|
||||
err := k8sClient.Status().Update(ctx, updated)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update pod status")
|
||||
|
||||
pod = new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(pod.UID), nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
).ShouldNot(
|
||||
BeEquivalentTo(oldPodUID),
|
||||
"Pod should be created",
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -312,26 +312,46 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
|
||||
cs := runnerContainerStatus(pod)
|
||||
switch {
|
||||
case cs == nil:
|
||||
// starting, no container state yet
|
||||
log.Info("Waiting for runner container status to be available")
|
||||
return ctrl.Result{}, nil
|
||||
case cs.State.Terminated == nil: // still running or evicted
|
||||
if pod.Status.Phase == corev1.PodFailed && pod.Status.Reason == "Evicted" {
|
||||
log.Info("Pod set the termination phase, but container state is not terminated. Deleting pod",
|
||||
"PodPhase", pod.Status.Phase,
|
||||
"PodReason", pod.Status.Reason,
|
||||
"PodMessage", pod.Status.Message,
|
||||
)
|
||||
case pod.Status.Phase == corev1.PodFailed: // All containers are stopped
|
||||
log.Info("Pod is in failed phase, inspecting runner container status",
|
||||
"podReason", pod.Status.Reason,
|
||||
"podMessage", pod.Status.Message,
|
||||
"podConditions", pod.Status.Conditions,
|
||||
)
|
||||
// If the runner pod did not have chance to start, terminated state may not be set.
|
||||
// Therefore, we should try to restart it.
|
||||
if cs == nil || cs.State.Terminated == nil {
|
||||
log.Info("Runner container does not have state set, deleting pod as failed so it can be restarted")
|
||||
return ctrl.Result{}, r.deleteEphemeralRunnerOrPod(ctx, ephemeralRunner, pod, log)
|
||||
}
|
||||
|
||||
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
|
||||
log.Error(err, "failed to delete pod as failed on pod.Status.Phase: Failed")
|
||||
if cs.State.Terminated.ExitCode == 0 {
|
||||
log.Info("Runner container has succeeded but pod is in failed phase; Assume successful exit")
|
||||
// If the pod is in a failed state, that means that at least one container exited with non-zero exit code.
|
||||
// If the runner container exits with 0, we assume that the runner has finished successfully.
|
||||
// If side-car container exits with non-zero, it shouldn't affect the runner. Runner exit code
|
||||
// drives the controller's inference of whether the job has succeeded or failed.
|
||||
if err := r.Delete(ctx, ephemeralRunner); err != nil {
|
||||
log.Error(err, "Failed to delete ephemeral runner after successful completion")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
log.Info("Ephemeral runner container is still running")
|
||||
log.Error(
|
||||
errors.New("ephemeral runner container has failed, with runner container exit code non-zero"),
|
||||
"Ephemeral runner container has failed, and runner container termination exit code is non-zero",
|
||||
"containerTerminatedState", cs.State.Terminated,
|
||||
)
|
||||
return ctrl.Result{}, r.deleteEphemeralRunnerOrPod(ctx, ephemeralRunner, pod, log)
|
||||
|
||||
case cs == nil:
|
||||
// starting, no container state yet
|
||||
log.Info("Waiting for runner container status to be available")
|
||||
return ctrl.Result{}, nil
|
||||
|
||||
case cs.State.Terminated == nil: // container is not terminated and pod phase is not failed, so runner is still running
|
||||
log.Info("Runner container is still running; updating ephemeral runner status")
|
||||
if err := r.updateRunStatusFromPod(ctx, ephemeralRunner, pod, log); err != nil {
|
||||
log.Info("Failed to update ephemeral runner status. Requeue to not miss this event")
|
||||
return ctrl.Result{}, err
|
||||
@@ -340,36 +360,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
|
||||
case cs.State.Terminated.ExitCode != 0: // failed
|
||||
log.Info("Ephemeral runner container failed", "exitCode", cs.State.Terminated.ExitCode)
|
||||
if ephemeralRunner.HasJob() {
|
||||
log.Error(
|
||||
errors.New("ephemeral runner has a job assigned, but the pod has failed"),
|
||||
"Ephemeral runner either has faulty entrypoint or something external killing the runner",
|
||||
)
|
||||
log.Info("Deleting the ephemeral runner that has a job assigned but the pod has failed")
|
||||
if err := r.Delete(ctx, ephemeralRunner); err != nil {
|
||||
log.Error(err, "Failed to delete the ephemeral runner that has a job assigned but the pod has failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Deleted the ephemeral runner that has a job assigned but the pod has failed")
|
||||
log.Info("Trying to remove the runner from the service")
|
||||
actionsClient, err := r.GetActionsService(ctx, ephemeralRunner)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to get actions client for removing the runner from the service")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil {
|
||||
log.Error(err, "Failed to remove the runner from the service")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
log.Info("Removed the runner from the service")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
|
||||
log.Error(err, "Failed to delete runner pod on failure")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
return ctrl.Result{}, r.deleteEphemeralRunnerOrPod(ctx, ephemeralRunner, pod, log)
|
||||
|
||||
default: // succeeded
|
||||
log.Info("Ephemeral runner has finished successfully, deleting ephemeral runner", "exitCode", cs.State.Terminated.ExitCode)
|
||||
@@ -381,6 +372,41 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
}
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) deleteEphemeralRunnerOrPod(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
|
||||
if ephemeralRunner.HasJob() {
|
||||
log.Error(
|
||||
errors.New("ephemeral runner has a job assigned, but the pod has failed"),
|
||||
"Ephemeral runner either has faulty entrypoint or something external killing the runner",
|
||||
)
|
||||
log.Info("Deleting the ephemeral runner that has a job assigned but the pod has failed")
|
||||
if err := r.Delete(ctx, ephemeralRunner); err != nil {
|
||||
log.Error(err, "Failed to delete the ephemeral runner that has a job assigned but the pod has failed")
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("Deleted the ephemeral runner that has a job assigned but the pod has failed")
|
||||
log.Info("Trying to remove the runner from the service")
|
||||
actionsClient, err := r.GetActionsService(ctx, ephemeralRunner)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to get actions client for removing the runner from the service")
|
||||
return nil
|
||||
}
|
||||
if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil {
|
||||
log.Error(err, "Failed to remove the runner from the service")
|
||||
return nil
|
||||
}
|
||||
log.Info("Removed the runner from the service")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
|
||||
log.Error(err, "Failed to delete runner pod on failure")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ok bool, err error) {
|
||||
if err := r.deleteRunnerFromService(ctx, ephemeralRunner, log); err != nil {
|
||||
actionsError := &actions.ActionsError{}
|
||||
@@ -546,6 +572,8 @@ func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralR
|
||||
|
||||
// deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count.
|
||||
// It should not be responsible for setting the status to Failed.
|
||||
//
|
||||
// It should be called by deleteEphemeralRunnerOrPod which is responsible for deciding whether to delete the EphemeralRunner or just the Pod.
|
||||
func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
|
||||
if pod.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the ephemeral runner pod", "podId", pod.UID)
|
||||
|
||||
@@ -261,6 +261,238 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
|
||||
})
|
||||
|
||||
It("It should delete ephemeral runner when pod failed before runner state is recorded and job assigned", func() {
|
||||
er := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(Succeed(), "failed to get ephemeral runner")
|
||||
|
||||
er.Status.JobID = "1"
|
||||
err := k8sClient.Status().Update(ctx, er)
|
||||
Expect(err).To(BeNil(), "failed to update ephemeral runner status")
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
current := new(v1alpha1.EphemeralRunner)
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, current); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return current.Status.JobID, nil
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo("1"))
|
||||
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(func() (bool, error) {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
|
||||
|
||||
pod.Status.Phase = corev1.PodFailed
|
||||
pod.Status.ContainerStatuses = nil
|
||||
err = k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||
|
||||
Eventually(func() bool {
|
||||
check := new(v1alpha1.EphemeralRunner)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, check)
|
||||
return kerrors.IsNotFound(err)
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
|
||||
})
|
||||
|
||||
It("It should delete ephemeral runner when pod failed before runner state is recorded and job not assigned", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(func() (bool, error) {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
|
||||
|
||||
oldPodUID := pod.UID
|
||||
|
||||
pod.Status.Phase = corev1.PodFailed
|
||||
pod.Status.ContainerStatuses = nil
|
||||
err := k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
err := k8sClient.Get(
|
||||
ctx,
|
||||
client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace},
|
||||
updated,
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(updated.Status.Failures), nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(1))
|
||||
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
newPod := new(corev1.Pod)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, newPod)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return newPod.UID != oldPodUID, nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeTrue(), "Pod should be re-created")
|
||||
})
|
||||
|
||||
It("It should treat pod failed with runner container exit 0 as success with job id", func() {
|
||||
er := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(Succeed(), "failed to get ephemeral runner")
|
||||
|
||||
er.Status.JobID = "1"
|
||||
err := k8sClient.Status().Update(ctx, er)
|
||||
Expect(err).To(BeNil(), "failed to update ephemeral runner status")
|
||||
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() error {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(Succeed(), "failed to get pod")
|
||||
|
||||
pod.Status.Phase = corev1.PodFailed
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: v1alpha1.EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
err = k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
check := new(v1alpha1.EphemeralRunner)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, check)
|
||||
return kerrors.IsNotFound(err)
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
|
||||
})
|
||||
|
||||
It("It should treat pod failed with runner container exit 0 as success with no job id", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() error {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(Succeed(), "failed to get pod")
|
||||
|
||||
pod.Status.Phase = corev1.PodFailed
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: v1alpha1.EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
err := k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
check := new(v1alpha1.EphemeralRunner)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, check)
|
||||
return kerrors.IsNotFound(err)
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
|
||||
})
|
||||
|
||||
It("It should mark as failed when job is not assigned and pod is failed", func() {
|
||||
er := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(Succeed(), "failed to get ephemeral runner")
|
||||
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() error {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(Succeed(), "failed to get pod")
|
||||
|
||||
pod.Status.Phase = corev1.PodFailed
|
||||
oldPodUID := pod.UID
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: v1alpha1.EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 1,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
err := k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
err := k8sClient.Get(
|
||||
ctx,
|
||||
client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace},
|
||||
updated,
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(updated.Status.Failures), nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(1))
|
||||
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
newPod := new(corev1.Pod)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, newPod)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return newPod.UID != oldPodUID, nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeTrue(), "Pod should be re-created")
|
||||
})
|
||||
|
||||
It("It should failed if a pod template is invalid", func() {
|
||||
invalideEphemeralRunner := newExampleRunner("invalid-ephemeral-runner", autoscalingNS.Name, configSecret.Name)
|
||||
invalideEphemeralRunner.Spec.Spec.PriorityClassName = "notexist"
|
||||
@@ -745,6 +977,52 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
It("It should re-create pod on reason starting with OutOf", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
pod.Status.Phase = corev1.PodFailed
|
||||
pod.Status.Reason = "OutOfpods"
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: v1alpha1.EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{},
|
||||
})
|
||||
err := k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "failed to patch pod status")
|
||||
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(updated.Status.Failures) == 1, nil
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
|
||||
|
||||
// should re-create after failure
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
pod := new(corev1.Pod)
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
It("It should not set the phase to succeeded without pod termination status", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
|
||||
28
go.mod
28
go.mod
@@ -20,21 +20,21 @@ require (
|
||||
github.com/hashicorp/go-retryablehttp v0.7.8
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/ginkgo/v2 v2.27.2
|
||||
github.com/onsi/gomega v1.38.2
|
||||
github.com/onsi/ginkgo/v2 v2.27.3
|
||||
github.com/onsi/gomega v1.38.3
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/teambition/rrule-go v1.8.2
|
||||
go.uber.org/multierr v1.11.0
|
||||
go.uber.org/zap v1.27.1
|
||||
golang.org/x/net v0.47.0
|
||||
golang.org/x/oauth2 v0.33.0
|
||||
golang.org/x/sync v0.18.0
|
||||
golang.org/x/net v0.48.0
|
||||
golang.org/x/oauth2 v0.34.0
|
||||
golang.org/x/sync v0.19.0
|
||||
gomodules.xyz/jsonpatch/v2 v2.5.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.34.2
|
||||
k8s.io/apimachinery v0.34.2
|
||||
k8s.io/client-go v0.34.2
|
||||
k8s.io/api v0.34.3
|
||||
k8s.io/apimachinery v0.34.3
|
||||
k8s.io/client-go v0.34.3
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4
|
||||
sigs.k8s.io/controller-runtime v0.22.4
|
||||
sigs.k8s.io/yaml v1.6.0
|
||||
@@ -165,14 +165,14 @@ require (
|
||||
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/crypto v0.46.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/mod v0.30.0 // indirect
|
||||
golang.org/x/sys v0.39.0 // indirect
|
||||
golang.org/x/term v0.38.0 // indirect
|
||||
golang.org/x/text v0.32.0 // indirect
|
||||
golang.org/x/time v0.13.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
golang.org/x/tools v0.39.0 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
|
||||
56
go.sum
56
go.sum
@@ -316,12 +316,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
|
||||
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
|
||||
github.com/onsi/ginkgo/v2 v2.27.3 h1:ICsZJ8JoYafeXFFlFAG75a7CxMsJHwgKwtO+82SE9L8=
|
||||
github.com/onsi/ginkgo/v2 v2.27.3/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
|
||||
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
|
||||
github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM=
|
||||
github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
@@ -391,30 +391,30 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
|
||||
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
|
||||
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 h1:TQwNpfvNkxAVlItJf6Cr5JTsVZoC/Sj7K3OZv2Pc14A=
|
||||
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
|
||||
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
|
||||
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
|
||||
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
||||
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -426,14 +426,14 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
|
||||
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||
golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
|
||||
golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -441,8 +441,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -475,14 +475,14 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY=
|
||||
k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw=
|
||||
k8s.io/api v0.34.3 h1:D12sTP257/jSH2vHV2EDYrb16bS7ULlHpdNdNhEw2S4=
|
||||
k8s.io/api v0.34.3/go.mod h1:PyVQBF886Q5RSQZOim7DybQjAbVs8g7gwJNhGtY5MBk=
|
||||
k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI=
|
||||
k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc=
|
||||
k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4=
|
||||
k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
|
||||
k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M=
|
||||
k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE=
|
||||
k8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE=
|
||||
k8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
|
||||
k8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A=
|
||||
k8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
|
||||
|
||||
Reference in New Issue
Block a user