Compare commits

..

3 Commits

Author SHA1 Message Date
Sebastien Le Digabel
b1bfa8787f Optional override of runner image in chart (#666)
* Optional override of runner image in chart

This commit adds the option to override the actions runner image. This
allows running the controller in environments where access to Dockerhub
is restricted.

It uses the parameter [--runner-image](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/main.go#L89) from the controller.
The default value is set as a constant
[here](acb906164b/main.go (L40)).

The default value for the chart is the same.

* Fixing actionsRunner name

... to actionsRunnerRepositoryAndTag for consistency.

* Bumping chart to v0.12.5
2021-06-30 09:53:45 +09:00
Yusuke Kuoka
c78116b0f9 e2e: Cover RunnerDeployment (#668)
Previously the E2E test suite covered only RunnerSet. This refactors the existing E2E test code to extract the common test structure into a `env` struct and its methods, and use it to write two very similar tests, one for RunnerSet and another for RunnerDeployment.
2021-06-29 17:52:43 +09:00
toast-gear
4ec57d3e39 chore: update helm create secret defaults to false (#669)
There's no reason to create a non-working secret by default. If someone wants to deploy the secrets via the chart they will need to do some config regardless so they might as well also set the create flag
2021-06-29 17:51:41 +09:00
14 changed files with 802 additions and 481 deletions

View File

@@ -211,10 +211,11 @@ acceptance/deploy:
acceptance/tests:
acceptance/checks.sh
# We use -count=1 instead of `go clean -testcache`
# See https://terratest.gruntwork.io/docs/testing-best-practices/avoid-test-caching/
.PHONY: e2e
e2e:
go clean -testcache
go test -v -timeout 600s -run '^TestE2E$$' ./test/e2e
go test -count=1 -v -timeout 600s -run '^TestE2E$$' ./test/e2e
# Upload release file to GitHub.
github-release: release

View File

@@ -15,7 +15,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.12.4
version: 0.12.5
# Used as the default manager tag value when no tag property is provided in the values.yaml
appVersion: 0.19.0

View File

@@ -16,7 +16,7 @@ _Default values are the defaults set in the charts values.yaml, some properties
| `githubAPICacheDuration` | Set the cache period for API calls | |
| `githubEnterpriseServerURL` | Set the URL for a self-hosted GitHub Enterprise Server | |
| `logLevel` | Set the log level of the controller container | |
| `authSecret.create` | Deploy the controller auth secret | true |
| `authSecret.create` | Deploy the controller auth secret | false |
| `authSecret.name` | Set the name of the auth secret | controller-manager |
| `authSecret.github_app_id` | The ID of your GitHub App. **This can't be set at the same time as `authSecret.github_token`** | |
| `authSecret.github_app_installation_id` | The ID of your GitHub App installation. **This can't be set at the same time as `authSecret.github_token`** | |
@@ -24,6 +24,7 @@ _Default values are the defaults set in the charts values.yaml, some properties
| `authSecret.github_token` | Your chosen GitHub PAT token. **This can't be set at the same time as the `authSecret.github_app_*`** | |
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
| `image.tag` | The tag of the controller container | |
| `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest |
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
| `metrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
@@ -56,7 +57,7 @@ _Default values are the defaults set in the charts values.yaml, some properties
| `githubWebhookServer.replicaCount` | Set the number of webhook server pods | 1 |
| `githubWebhookServer.syncPeriod` | Set the period in which the controller reconciles the resources | 10m |
| `githubWebhookServer.enabled` | Deploy the webhook server pod | false |
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | true |
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | false |
| `githubWebhookServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
| `githubWebhookServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
| `githubWebhookServer.imagePullSecrets` | Specifies the secret to be used when pulling the githubWebhookServer pod containers | |

View File

@@ -40,6 +40,7 @@ spec:
- "--enable-leader-election"
- "--sync-period={{ .Values.syncPeriod }}"
- "--docker-image={{ .Values.image.dindSidecarRepositoryAndTag }}"
- "--runner-image={{ .Values.image.actionsRunnerRepositoryAndTag }}"
{{- if .Values.scope.singleNamespace }}
- "--watch-namespace={{ default .Release.Namespace .Values.scope.watchNamespace }}"
{{- end }}

View File

@@ -19,7 +19,7 @@ syncPeriod: 10m
# Only 1 authentication method can be deployed at a time
# Uncomment the configuration you are applying and fill in the details
authSecret:
create: true
create: false
name: "controller-manager"
### GitHub Apps Configuration
#github_app_id: ""
@@ -29,7 +29,8 @@ authSecret:
#github_token: ""
image:
repository: summerwind/actions-runner-controller
repository: "summerwind/actions-runner-controller"
actionsRunnerRepositoryAndTag: "summerwind/actions-runner:latest"
dindSidecarRepositoryAndTag: "docker:dind"
pullPolicy: IfNotPresent
@@ -118,10 +119,10 @@ githubWebhookServer:
replicaCount: 1
syncPeriod: 10m
secret:
create: true
create: false
name: "github-webhook-server"
### GitHub Webhook Configuration
#github_webhook_secret_token: ""
github_webhook_secret_token: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""

View File

@@ -3,8 +3,6 @@ package e2e
import (
"context"
"fmt"
"math/rand"
"os"
"path/filepath"
"time"
@@ -14,19 +12,12 @@ import (
)
var (
Img = func(repo, tag string) testing.ContainerImage {
return testing.ContainerImage{
Repo: repo,
Tag: tag,
}
}
controllerImageRepo = "actionsrunnercontrollere2e/actions-runner-controller"
controllerImageTag = "e2e"
controllerImage = Img(controllerImageRepo, controllerImageTag)
controllerImage = testing.Img(controllerImageRepo, controllerImageTag)
runnerImageRepo = "actionsrunnercontrollere2e/actions-runner"
runnerImageTag = "e2e"
runnerImage = Img(runnerImageRepo, runnerImageTag)
runnerImage = testing.Img(runnerImageRepo, runnerImageTag)
prebuildImages = []testing.ContainerImage{
controllerImage,
@@ -49,12 +40,22 @@ var (
certManagerVersion = "v1.1.1"
images = []testing.ContainerImage{
Img("docker", "dind"),
Img("quay.io/brancz/kube-rbac-proxy", "v0.10.0"),
Img("quay.io/jetstack/cert-manager-controller", certManagerVersion),
Img("quay.io/jetstack/cert-manager-cainjector", certManagerVersion),
Img("quay.io/jetstack/cert-manager-webhook", certManagerVersion),
testing.Img("docker", "dind"),
testing.Img("quay.io/brancz/kube-rbac-proxy", "v0.10.0"),
testing.Img("quay.io/jetstack/cert-manager-controller", certManagerVersion),
testing.Img("quay.io/jetstack/cert-manager-cainjector", certManagerVersion),
testing.Img("quay.io/jetstack/cert-manager-webhook", certManagerVersion),
}
commonScriptEnv = []string{
"SYNC_PERIOD=" + "10s",
"NAME=" + controllerImageRepo,
"VERSION=" + controllerImageTag,
"RUNNER_NAME=" + runnerImageRepo,
"RUNNER_TAG=" + runnerImageTag,
}
testResultCMNamePrefix = "test-result-"
)
// If you're willing to run this test via VS Code "run test" or "debug test",
@@ -71,141 +72,208 @@ var (
// This function requires a few environment variables to be set to provide some test data.
// If you're using VS Code and wanting to run this test locally,
// Browse "Workspace Settings" and search for "go test env file" and put e.g. "${workspaceFolder}/.test.env" there.
//
// Instead of relying on "stages" to make it possible to rerun individual tests like terratest,
// you use the "run subtest" feature provided by IDE like VS Code, IDEA, and GoLand.
// Our `testing` package automatically checks for the running test name and skips the cleanup tasks
// whenever the whole test failed, so that you can immediately start fixing issues and rerun inidividual tests.
// See the below link for how terratest handles this:
// https://terratest.gruntwork.io/docs/testing-best-practices/iterating-locally-using-test-stages/
func TestE2E(t *testing.T) {
if testing.Short() {
t.Skip("Skipped as -short is set")
}
k := testing.Start(t, testing.Cluster{}, testing.Preload(images...))
env := initTestEnv(t)
env.useRunnerSet = true
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
t.Run("build images", func(t *testing.T) {
if err := k.BuildImages(ctx, builds); err != nil {
t.Fatal(err)
}
t.Run("build and load images", func(t *testing.T) {
env.buildAndLoadImages(t)
})
t.Run("load images", func(t *testing.T) {
if err := k.LoadImages(ctx, prebuildImages); err != nil {
t.Fatal(err)
}
})
kubectlEnv := []string{
"KUBECONFIG=" + k.Kubeconfig(),
}
t.Run("install cert-manager", func(t *testing.T) {
applyCfg := testing.KubectlConfig{NoValidate: true, Env: kubectlEnv}
if err := k.Apply(ctx, fmt.Sprintf("https://github.com/jetstack/cert-manager/releases/download/%s/cert-manager.yaml", certManagerVersion), applyCfg); err != nil {
t.Fatal(err)
}
waitCfg := testing.KubectlConfig{
Env: kubectlEnv,
Namespace: "cert-manager",
Timeout: 90 * time.Second,
}
if err := k.WaitUntilDeployAvailable(ctx, "cert-manager-cainjector", waitCfg); err != nil {
t.Fatal(err)
}
if err := k.WaitUntilDeployAvailable(ctx, "cert-manager-webhook", waitCfg.WithTimeout(60*time.Second)); err != nil {
t.Fatal(err)
}
if err := k.WaitUntilDeployAvailable(ctx, "cert-manager", waitCfg.WithTimeout(60*time.Second)); err != nil {
t.Fatal(err)
}
if err := k.RunKubectlEnsureNS(ctx, "actions-runner-system", testing.KubectlConfig{Env: kubectlEnv}); err != nil {
t.Fatal(err)
}
env.installCertManager(t)
})
t.Run("make default serviceaccount cluster-admin", func(t *testing.T) {
cfg := testing.KubectlConfig{Env: kubectlEnv}
bindingName := "default-admin"
if _, err := k.GetClusterRoleBinding(ctx, bindingName, cfg); err != nil {
if err := k.CreateClusterRoleBindingServiceAccount(ctx, bindingName, "cluster-admin", "default:default", cfg); err != nil {
t.Fatal(err)
}
}
})
cmCfg := testing.KubectlConfig{
Env: kubectlEnv,
}
testInfoName := "test-info"
m, _ := k.GetCMLiterals(ctx, testInfoName, cmCfg)
t.Run("Save test ID", func(t *testing.T) {
if m == nil {
id := RandStringBytesRmndr(10)
m = map[string]string{"id": id}
if err := k.CreateCMLiterals(ctx, testInfoName, m, cmCfg); err != nil {
t.Fatal(err)
}
}
})
id := m["id"]
runnerLabel := "test-" + id
testID := t.Name() + " " + id
t.Logf("Using test id %s", testID)
githubToken := getenv(t, "GITHUB_TOKEN")
testRepo := getenv(t, "TEST_REPO")
testOrg := getenv(t, "TEST_ORG")
testOrgRepo := getenv(t, "TEST_ORG_REPO")
if t.Failed() {
return
}
t.Run("install actions-runner-controller and runners", func(t *testing.T) {
scriptEnv := []string{
"KUBECONFIG=" + k.Kubeconfig(),
"ACCEPTANCE_TEST_DEPLOYMENT_TOOL=" + "helm",
"ACCEPTANCE_TEST_SECRET_TYPE=token",
"NAME=" + controllerImageRepo,
"VERSION=" + controllerImageTag,
"RUNNER_NAME=" + runnerImageRepo,
"RUNNER_TAG=" + runnerImageTag,
"TEST_REPO=" + testRepo,
"TEST_ORG=" + testOrg,
"TEST_ORG_REPO=" + testOrgRepo,
"SYNC_PERIOD=" + "10s",
"USE_RUNNERSET=" + "1",
"GITHUB_TOKEN=" + githubToken,
"RUNNER_LABEL=" + runnerLabel,
}
if err := k.RunScript(ctx, "../../acceptance/deploy.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv}); err != nil {
t.Fatal(err)
}
env.installActionsRunnerController(t)
})
testResultCMNamePrefix := "test-result-"
if t.Failed() {
return
}
numJobs := 2
t.Run("Install workflow", func(t *testing.T) {
env.installActionsWorkflow(t)
})
type job struct {
name, testArg, configMapName string
if t.Failed() {
return
}
t.Run("Verify workflow run result", func(t *testing.T) {
env.verifyActionsWorkflowRun(t)
})
}
func TestE2ERunnerDeploy(t *testing.T) {
if testing.Short() {
t.Skip("Skipped as -short is set")
}
env := initTestEnv(t)
t.Run("build and load images", func(t *testing.T) {
env.buildAndLoadImages(t)
})
t.Run("install cert-manager", func(t *testing.T) {
env.installCertManager(t)
})
if t.Failed() {
return
}
t.Run("install actions-runner-controller and runners", func(t *testing.T) {
env.installActionsRunnerController(t)
})
if t.Failed() {
return
}
t.Run("Install workflow", func(t *testing.T) {
env.installActionsWorkflow(t)
})
if t.Failed() {
return
}
t.Run("Verify workflow run result", func(t *testing.T) {
env.verifyActionsWorkflowRun(t)
})
}
type env struct {
*testing.Env
useRunnerSet bool
testID string
runnerLabel, githubToken, testRepo, testOrg, testOrgRepo string
testJobs []job
}
func initTestEnv(t *testing.T) *env {
t.Helper()
testingEnv := testing.Start(t, testing.Preload(images...))
e := &env{Env: testingEnv}
id := e.ID()
testID := t.Name() + " " + id
t.Logf("Using test id %s", testID)
e.testID = testID
e.runnerLabel = "test-" + id
e.githubToken = testing.Getenv(t, "GITHUB_TOKEN")
e.testRepo = testing.Getenv(t, "TEST_REPO")
e.testOrg = testing.Getenv(t, "TEST_ORG")
e.testOrgRepo = testing.Getenv(t, "TEST_ORG_REPO")
e.testJobs = createTestJobs(id, testResultCMNamePrefix, 2)
return e
}
func (e *env) f() {
}
func (e *env) buildAndLoadImages(t *testing.T) {
t.Helper()
e.DockerBuild(t, builds)
e.KindLoadImages(t, prebuildImages)
}
func (e *env) installCertManager(t *testing.T) {
t.Helper()
applyCfg := testing.KubectlConfig{NoValidate: true}
e.KubectlApply(t, fmt.Sprintf("https://github.com/jetstack/cert-manager/releases/download/%s/cert-manager.yaml", certManagerVersion), applyCfg)
waitCfg := testing.KubectlConfig{
Namespace: "cert-manager",
Timeout: 90 * time.Second,
}
e.KubectlWaitUntilDeployAvailable(t, "cert-manager-cainjector", waitCfg)
e.KubectlWaitUntilDeployAvailable(t, "cert-manager-webhook", waitCfg.WithTimeout(60*time.Second))
e.KubectlWaitUntilDeployAvailable(t, "cert-manager", waitCfg.WithTimeout(60*time.Second))
}
func (e *env) installActionsRunnerController(t *testing.T) {
t.Helper()
e.createControllerNamespaceAndServiceAccount(t)
scriptEnv := []string{
"KUBECONFIG=" + e.Kubeconfig(),
"ACCEPTANCE_TEST_DEPLOYMENT_TOOL=" + "helm",
"ACCEPTANCE_TEST_SECRET_TYPE=token",
}
if e.useRunnerSet {
scriptEnv = append(scriptEnv, "USE_RUNNERSET=1")
}
varEnv := []string{
"TEST_REPO=" + e.testRepo,
"TEST_ORG=" + e.testOrg,
"TEST_ORG_REPO=" + e.testOrgRepo,
"GITHUB_TOKEN=" + e.githubToken,
"RUNNER_LABEL=" + e.runnerLabel,
}
scriptEnv = append(scriptEnv, varEnv...)
scriptEnv = append(scriptEnv, commonScriptEnv...)
e.RunScript(t, "../../acceptance/deploy.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
}
func (e *env) createControllerNamespaceAndServiceAccount(t *testing.T) {
t.Helper()
e.KubectlEnsureNS(t, "actions-runner-system", testing.KubectlConfig{})
e.KubectlEnsureClusterRoleBindingServiceAccount(t, "default-admin", "cluster-admin", "default:default", testing.KubectlConfig{})
}
func (e *env) installActionsWorkflow(t *testing.T) {
t.Helper()
installActionsWorkflow(t, e.testID, e.runnerLabel, testResultCMNamePrefix, e.testRepo, e.testJobs)
}
func (e *env) verifyActionsWorkflowRun(t *testing.T) {
t.Helper()
verifyActionsWorkflowRun(t, e.Env, e.testJobs)
}
type job struct {
name, testArg, configMapName string
}
func createTestJobs(id, testResultCMNamePrefix string, numJobs int) []job {
var testJobs []job
for i := 0; i < numJobs; i++ {
@@ -216,45 +284,52 @@ func TestE2E(t *testing.T) {
testJobs = append(testJobs, job{name: name, testArg: testArg, configMapName: configMapName})
}
t.Run("Install workflow", func(t *testing.T) {
wfName := "E2E " + testID
wf := testing.Workflow{
Name: wfName,
On: testing.On{
Push: &testing.Push{
Branches: []string{"main"},
return testJobs
}
func installActionsWorkflow(t *testing.T, testID, runnerLabel, testResultCMNamePrefix, testRepo string, testJobs []job) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wfName := "E2E " + testID
wf := testing.Workflow{
Name: wfName,
On: testing.On{
Push: &testing.Push{
Branches: []string{"main"},
},
},
Jobs: map[string]testing.Job{},
}
for _, j := range testJobs {
wf.Jobs[j.name] = testing.Job{
RunsOn: runnerLabel,
Steps: []testing.Step{
{
Uses: testing.ActionsCheckoutV2,
},
{
Uses: "azure/setup-kubectl@v1",
With: &testing.With{
Version: "v1.20.2",
},
},
{
Run: fmt.Sprintf("./test.sh %s %s", t.Name(), j.testArg),
},
},
Jobs: map[string]testing.Job{},
}
}
for i := 0; i < numJobs; i++ {
j := testJobs[i]
wf.Jobs[j.name] = testing.Job{
RunsOn: runnerLabel,
Steps: []testing.Step{
{
Uses: testing.ActionsCheckoutV2,
},
{
Uses: "azure/setup-kubectl@v1",
With: &testing.With{
Version: "v1.20.2",
},
},
{
Run: fmt.Sprintf("./test.sh %s %s", t.Name(), j.testArg),
},
},
}
}
wfContent, err := yaml.Marshal(wf)
if err != nil {
t.Fatal(err)
}
wfContent, err := yaml.Marshal(wf)
if err != nil {
t.Fatal(err)
}
script := []byte(fmt.Sprintf(`#!/usr/bin/env bash
script := []byte(fmt.Sprintf(`#!/usr/bin/env bash
set -vx
name=$1
id=$2
@@ -263,87 +338,70 @@ kubectl delete cm %s$id || true
kubectl create cm %s$id --from-literal=status=ok
`, testResultCMNamePrefix, testResultCMNamePrefix))
g := testing.GitRepo{
Dir: filepath.Join(t.TempDir(), "gitrepo"),
Name: testRepo,
CommitMessage: wfName,
Contents: map[string][]byte{
".github/workflows/workflow.yaml": wfContent,
"test.sh": script,
},
}
if err := g.Sync(ctx); err != nil {
t.Fatal(err)
}
})
if t.Failed() {
return
g := testing.GitRepo{
Dir: filepath.Join(t.TempDir(), "gitrepo"),
Name: testRepo,
CommitMessage: wfName,
Contents: map[string][]byte{
".github/workflows/workflow.yaml": wfContent,
"test.sh": script,
},
}
t.Run("Verify workflow run result", func(t *testing.T) {
var expected []string
for i := 0; i < numJobs; i++ {
expected = append(expected, "ok")
}
gomega.NewGomegaWithT(t).Eventually(func() ([]string, error) {
var results []string
var errs []error
for i := 0; i < numJobs; i++ {
testResultCMName := testJobs[i].configMapName
m, err := k.GetCMLiterals(ctx, testResultCMName, cmCfg)
if err != nil {
errs = append(errs, err)
} else {
result := m["status"]
results = append(results, result)
}
}
var err error
if len(errs) > 0 {
var msg string
for i, e := range errs {
msg += fmt.Sprintf("error%d: %v\n", i, e)
}
err = fmt.Errorf("%d errors occurred: %s", len(errs), msg)
}
return results, err
}, 60*time.Second, 10*time.Second).Should(gomega.Equal(expected))
})
if err := g.Sync(ctx); err != nil {
t.Fatal(err)
}
}
func getenv(t *testing.T, name string) string {
func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job) {
t.Helper()
v := os.Getenv(name)
if v == "" {
t.Fatal(name + " must be set")
var expected []string
for _ = range testJobs {
expected = append(expected, "ok")
}
return v
}
func init() {
rand.Seed(time.Now().UnixNano())
}
gomega.NewGomegaWithT(t).Eventually(func() ([]string, error) {
var results []string
const letterBytes = "abcdefghijklmnopqrstuvwxyz"
var errs []error
// Copied from https://stackoverflow.com/a/31832326 with thanks
func RandStringBytesRmndr(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = letterBytes[rand.Int63()%int64(len(letterBytes))]
}
return string(b)
for i := range testJobs {
testResultCMName := testJobs[i].configMapName
kubectlEnv := []string{
"KUBECONFIG=" + env.Kubeconfig(),
}
cmCfg := testing.KubectlConfig{
Env: kubectlEnv,
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
m, err := env.Kubectl.GetCMLiterals(ctx, testResultCMName, cmCfg)
if err != nil {
errs = append(errs, err)
} else {
result := m["status"]
results = append(results, result)
}
}
var err error
if len(errs) > 0 {
var msg string
for i, e := range errs {
msg += fmt.Sprintf("error%d: %v\n", i, e)
}
err = fmt.Errorf("%d errors occurred: %s", len(errs), msg)
}
return results, err
}, 60*time.Second, 10*time.Second).Should(gomega.Equal(expected))
}

42
testing/bash.go Normal file
View File

@@ -0,0 +1,42 @@
package testing
import (
"context"
"os"
"os/exec"
"path/filepath"
"github.com/actions-runner-controller/actions-runner-controller/testing/runtime"
)
type ScriptConfig struct {
Env []string
Dir string
}
type Bash struct {
runtime.Cmdr
}
func (k *Bash) RunScript(ctx context.Context, path string, cfg ScriptConfig) error {
abs, err := filepath.Abs(path)
if err != nil {
return err
}
if _, err := k.CombinedOutput(k.bashRunScriptCmd(ctx, abs, cfg)); err != nil {
return err
}
return nil
}
func (k *Bash) bashRunScriptCmd(ctx context.Context, path string, cfg ScriptConfig) *exec.Cmd {
cmd := exec.CommandContext(ctx, "bash", path)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, cfg.Env...)
cmd.Dir = cfg.Dir
return cmd
}

49
testing/docker.go Normal file
View File

@@ -0,0 +1,49 @@
package testing
import (
"context"
"fmt"
"os/exec"
"path/filepath"
"github.com/actions-runner-controller/actions-runner-controller/testing/runtime"
)
type Docker struct {
runtime.Cmdr
}
type DockerBuild struct {
Dockerfile string
Args []BuildArg
Image ContainerImage
}
type BuildArg struct {
Name, Value string
}
func (k *Docker) Build(ctx context.Context, builds []DockerBuild) error {
for _, build := range builds {
var args []string
args = append(args, "--build-arg=TARGETPLATFORM="+"linux/amd64")
for _, buildArg := range build.Args {
args = append(args, "--build-arg="+buildArg.Name+"="+buildArg.Value)
}
_, err := k.CombinedOutput(k.dockerBuildCmd(ctx, build.Dockerfile, build.Image.Repo, build.Image.Tag, args))
if err != nil {
return fmt.Errorf("failed building %v: %w", build, err)
}
}
return nil
}
func (k *Docker) dockerBuildCmd(ctx context.Context, dockerfile, repo, tag string, args []string) *exec.Cmd {
buildContext := filepath.Dir(dockerfile)
args = append([]string{"build", "--tag", repo + ":" + tag, "-f", dockerfile, buildContext}, args...)
cmd := exec.CommandContext(ctx, "docker", args...)
return cmd
}

16
testing/getenv.go Normal file
View File

@@ -0,0 +1,16 @@
package testing
import (
"os"
"testing"
)
func Getenv(t *testing.T, name string) string {
t.Helper()
v := os.Getenv(name)
if v == "" {
t.Fatal(name + " must be set")
}
return v
}

View File

@@ -7,7 +7,8 @@ import (
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/actions-runner-controller/actions-runner-controller/testing/runtime"
)
type GitRepo struct {
@@ -15,6 +16,8 @@ type GitRepo struct {
Name string
CommitMessage string
Contents map[string][]byte
runtime.Cmdr
}
func (g *GitRepo) Sync(ctx context.Context) error {
@@ -34,7 +37,7 @@ func (g *GitRepo) Sync(ctx context.Context) error {
return fmt.Errorf("error getting abs path for %q: %w", g.Dir, err)
}
if _, err := g.combinedOutput(g.gitCloneCmd(ctx, repoURL, dir)); err != nil {
if _, err := g.CombinedOutput(g.gitCloneCmd(ctx, repoURL, dir)); err != nil {
return err
}
@@ -45,17 +48,17 @@ func (g *GitRepo) Sync(ctx context.Context) error {
return fmt.Errorf("error writing %s: %w", path, err)
}
if _, err := g.combinedOutput(g.gitAddCmd(ctx, dir, path)); err != nil {
if _, err := g.CombinedOutput(g.gitAddCmd(ctx, dir, path)); err != nil {
return err
}
}
if _, err := g.combinedOutput(g.gitDiffCmd(ctx, dir)); err != nil {
if _, err := g.combinedOutput(g.gitCommitCmd(ctx, dir, g.CommitMessage)); err != nil {
if _, err := g.CombinedOutput(g.gitDiffCmd(ctx, dir)); err != nil {
if _, err := g.CombinedOutput(g.gitCommitCmd(ctx, dir, g.CommitMessage)); err != nil {
return err
}
if _, err := g.combinedOutput(g.gitPushCmd(ctx, dir)); err != nil {
if _, err := g.CombinedOutput(g.gitPushCmd(ctx, dir)); err != nil {
return err
}
}
@@ -90,23 +93,3 @@ func (g *GitRepo) gitPushCmd(ctx context.Context, dir string) *exec.Cmd {
cmd.Dir = dir
return cmd
}
func (g *GitRepo) combinedOutput(cmd *exec.Cmd) (string, error) {
o, err := cmd.CombinedOutput()
if err != nil {
args := append([]string{}, cmd.Args...)
args[0] = cmd.Path
cs := strings.Join(args, " ")
s := string(o)
g.errorf("%s failed with output:\n%s", cs, s)
return s, err
}
return string(o), nil
}
func (g *GitRepo) errorf(f string, args ...interface{}) {
fmt.Fprintf(os.Stderr, f+"\n", args...)
}

125
testing/kubectl.go Normal file
View File

@@ -0,0 +1,125 @@
package testing
import (
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"time"
"github.com/actions-runner-controller/actions-runner-controller/testing/runtime"
)
type Kubectl struct {
runtime.Cmdr
}
type KubectlConfig struct {
Env []string
NoValidate bool
Timeout time.Duration
Namespace string
}
func (k KubectlConfig) WithTimeout(o time.Duration) KubectlConfig {
k.Timeout = o
return k
}
func (k *Kubectl) EnsureNS(ctx context.Context, name string, cfg KubectlConfig) error {
if _, err := k.CombinedOutput(k.kubectlCmd(ctx, "get", []string{"ns", name}, cfg)); err != nil {
if _, err := k.CombinedOutput(k.kubectlCmd(ctx, "create", []string{"ns", name}, cfg)); err != nil {
return err
}
}
return nil
}
func (k *Kubectl) GetClusterRoleBinding(ctx context.Context, name string, cfg KubectlConfig) (string, error) {
o, err := k.CombinedOutput(k.kubectlCmd(ctx, "get", []string{"clusterrolebinding", name}, cfg))
if err != nil {
return "", err
}
return o, nil
}
func (k *Kubectl) CreateClusterRoleBindingServiceAccount(ctx context.Context, name string, clusterrole string, sa string, cfg KubectlConfig) error {
_, err := k.CombinedOutput(k.kubectlCmd(ctx, "create", []string{"clusterrolebinding", name, "--clusterrole=" + clusterrole, "--serviceaccount=" + sa}, cfg))
if err != nil {
return err
}
return nil
}
func (k *Kubectl) GetCMLiterals(ctx context.Context, name string, cfg KubectlConfig) (map[string]string, error) {
o, err := k.CombinedOutput(k.kubectlCmd(ctx, "get", []string{"cm", name, "-o=json"}, cfg))
if err != nil {
return nil, err
}
var cm struct {
Data map[string]string `json:"data"`
}
if err := json.Unmarshal([]byte(o), &cm); err != nil {
k.Errorf("Failed unmarshalling this data to JSON:\n%s\n", o)
return nil, fmt.Errorf("unmarshalling json: %w", err)
}
return cm.Data, nil
}
func (k *Kubectl) CreateCMLiterals(ctx context.Context, name string, literals map[string]string, cfg KubectlConfig) error {
args := []string{"cm", name}
for k, v := range literals {
args = append(args, fmt.Sprintf("--from-literal=%s=%s", k, v))
}
if _, err := k.CombinedOutput(k.kubectlCmd(ctx, "create", args, cfg)); err != nil {
return err
}
return nil
}
func (k *Kubectl) Apply(ctx context.Context, path string, cfg KubectlConfig) error {
if _, err := k.CombinedOutput(k.kubectlCmd(ctx, "apply", []string{"-f", path}, cfg)); err != nil {
return err
}
return nil
}
func (k *Kubectl) WaitUntilDeployAvailable(ctx context.Context, name string, cfg KubectlConfig) error {
if _, err := k.CombinedOutput(k.kubectlCmd(ctx, "wait", []string{"deploy/" + name, "--for=condition=available"}, cfg)); err != nil {
return err
}
return nil
}
func (k *Kubectl) kubectlCmd(ctx context.Context, c string, args []string, cfg KubectlConfig) *exec.Cmd {
args = append([]string{c}, args...)
if cfg.NoValidate {
args = append(args, "--validate=false")
}
if cfg.Namespace != "" {
args = append(args, "-n="+cfg.Namespace)
}
if cfg.Timeout > 0 {
args = append(args, "--timeout="+fmt.Sprintf("%s", cfg.Timeout))
}
cmd := exec.CommandContext(ctx, "kubectl", args...)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, cfg.Env...)
return cmd
}

21
testing/random.go Normal file
View File

@@ -0,0 +1,21 @@
package testing
import (
"math/rand"
"time"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
const letterBytes = "abcdefghijklmnopqrstuvwxyz"
// Copied from https://stackoverflow.com/a/31832326 with thanks
func RandStringBytesRmndr(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = letterBytes[rand.Int63()%int64(len(letterBytes))]
}
return string(b)
}

View File

@@ -0,0 +1,31 @@
package runtime
import (
"fmt"
"os"
"os/exec"
"strings"
)
type Cmdr struct {
}
func (k Cmdr) CombinedOutput(cmd *exec.Cmd) (string, error) {
o, err := cmd.CombinedOutput()
if err != nil {
args := append([]string{}, cmd.Args...)
args[0] = cmd.Path
cs := strings.Join(args, " ")
s := string(o)
k.Errorf("%s failed with output:\n%s", cs, s)
return s, err
}
return string(o), nil
}
func (k Cmdr) Errorf(f string, args ...interface{}) {
fmt.Fprintf(os.Stderr, f+"\n", args...)
}

View File

@@ -2,7 +2,6 @@ package testing
import (
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
@@ -10,16 +9,199 @@ import (
"strings"
"testing"
"time"
"github.com/actions-runner-controller/actions-runner-controller/testing/runtime"
)
type T = testing.T
var Short = testing.Short
// Cluster is a test cluster backend by a kind cluster and the dockerd powering it.
func Img(repo, tag string) ContainerImage {
return ContainerImage{
Repo: repo,
Tag: tag,
}
}
// Env is a testing environment.
// All of its methods are idempotent so that you can safely call it from within each subtest
// and you can rerun the individual subtest until it works as you expect.
type Env struct {
kind *Kind
docker *Docker
Kubectl *Kubectl
bash *Bash
id string
}
func Start(t *testing.T, opts ...Option) *Env {
t.Helper()
k := StartKind(t, opts...)
var env Env
env.kind = k
d := &Docker{}
env.docker = d
kctl := &Kubectl{}
env.Kubectl = kctl
bash := &Bash{}
env.bash = bash
//
cmKey := "id"
kubectlEnv := []string{
"KUBECONFIG=" + k.Kubeconfig(),
}
cmCfg := KubectlConfig{
Env: kubectlEnv,
}
testInfoName := "test-info"
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
m, _ := kctl.GetCMLiterals(ctx, testInfoName, cmCfg)
if m == nil {
id := RandStringBytesRmndr(10)
m = map[string]string{cmKey: id}
if err := kctl.CreateCMLiterals(ctx, testInfoName, m, cmCfg); err != nil {
t.Fatal(err)
}
}
env.id = m[cmKey]
return &env
}
func (e *Env) ID() string {
return e.id
}
func (e *Env) DockerBuild(t *testing.T, builds []DockerBuild) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
if err := e.docker.Build(ctx, builds); err != nil {
t.Fatal(err)
}
}
func (e *Env) KindLoadImages(t *testing.T, prebuildImages []ContainerImage) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
if err := e.kind.LoadImages(ctx, prebuildImages); err != nil {
t.Fatal(err)
}
}
func (e *Env) KubectlApply(t *testing.T, path string, cfg KubectlConfig) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
kubectlEnv := []string{
"KUBECONFIG=" + e.kind.Kubeconfig(),
}
cfg.Env = append(kubectlEnv, cfg.Env...)
if err := e.Kubectl.Apply(ctx, path, cfg); err != nil {
t.Fatal(err)
}
}
func (e *Env) KubectlWaitUntilDeployAvailable(t *testing.T, name string, cfg KubectlConfig) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
kubectlEnv := []string{
"KUBECONFIG=" + e.kind.Kubeconfig(),
}
cfg.Env = append(kubectlEnv, cfg.Env...)
if err := e.Kubectl.WaitUntilDeployAvailable(ctx, name, cfg); err != nil {
t.Fatal(err)
}
}
func (e *Env) KubectlEnsureNS(t *testing.T, name string, cfg KubectlConfig) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
kubectlEnv := []string{
"KUBECONFIG=" + e.kind.Kubeconfig(),
}
cfg.Env = append(kubectlEnv, cfg.Env...)
if err := e.Kubectl.EnsureNS(ctx, name, cfg); err != nil {
t.Fatal(err)
}
}
func (e *Env) KubectlEnsureClusterRoleBindingServiceAccount(t *testing.T, bindingName string, clusterrole string, serviceaccount string, cfg KubectlConfig) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
kubectlEnv := []string{
"KUBECONFIG=" + e.kind.Kubeconfig(),
}
cfg.Env = append(kubectlEnv, cfg.Env...)
if _, err := e.Kubectl.GetClusterRoleBinding(ctx, bindingName, cfg); err != nil {
if err := e.Kubectl.CreateClusterRoleBindingServiceAccount(ctx, bindingName, clusterrole, serviceaccount, cfg); err != nil {
t.Fatal(err)
}
}
}
func (e *Env) Kubeconfig() string {
return e.kind.Kubeconfig()
}
func (e *Env) RunScript(t *testing.T, path string, cfg ScriptConfig) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
if err := e.bash.RunScript(ctx, path, cfg); err != nil {
t.Fatal(err)
}
}
// Kind is a test cluster backend by a kind cluster and the dockerd powering it.
// It intracts with the kind cluster via the kind command and dockerd via the docker command
// for various operations that otherwise needs to be automated via shell scripts or makefiles.
type Cluster struct {
type Kind struct {
// Name is the name of the cluster
Name string
@@ -29,6 +211,8 @@ type Cluster struct {
Dir string
kubeconfig string
runtime.Cmdr
}
type Config struct {
@@ -50,7 +234,7 @@ type ContainerImage struct {
Repo, Tag string
}
func Start(t *testing.T, k Cluster, opts ...Option) *Cluster {
func StartKind(t *testing.T, opts ...Option) *Kind {
t.Helper()
invalidChars := []string{"/"}
@@ -60,7 +244,7 @@ func Start(t *testing.T, k Cluster, opts ...Option) *Cluster {
for _, c := range invalidChars {
name = strings.ReplaceAll(name, c, "")
}
var k Kind
k.Name = name
k.Dir = t.TempDir()
@@ -118,12 +302,12 @@ func Start(t *testing.T, k Cluster, opts ...Option) *Cluster {
return kk
}
func (k *Cluster) Kubeconfig() string {
func (k *Kind) Kubeconfig() string {
return k.kubeconfig
}
func (k *Cluster) Start(ctx context.Context) error {
getNodes, err := k.combinedOutput(k.kindGetNodesCmd(ctx, k.Name))
func (k *Kind) Start(ctx context.Context) error {
getNodes, err := k.CombinedOutput(k.kindGetNodesCmd(ctx, k.Name))
if err != nil {
return err
}
@@ -145,7 +329,7 @@ name: %s
return err
}
if _, err := k.combinedOutput(k.kindCreateCmd(ctx, k.Name, f.Name())); err != nil {
if _, err := k.CombinedOutput(k.kindCreateCmd(ctx, k.Name, f.Name())); err != nil {
return err
}
}
@@ -153,70 +337,15 @@ name: %s
return nil
}
func (k *Cluster) combinedOutput(cmd *exec.Cmd) (string, error) {
o, err := cmd.CombinedOutput()
if err != nil {
args := append([]string{}, cmd.Args...)
args[0] = cmd.Path
cs := strings.Join(args, " ")
s := string(o)
k.errorf("%s failed with output:\n%s", cs, s)
return s, err
}
return string(o), nil
}
func (k *Cluster) errorf(f string, args ...interface{}) {
fmt.Fprintf(os.Stderr, f+"\n", args...)
}
func (k *Cluster) kindGetNodesCmd(ctx context.Context, cluster string) *exec.Cmd {
func (k *Kind) kindGetNodesCmd(ctx context.Context, cluster string) *exec.Cmd {
return exec.CommandContext(ctx, "kind", "get", "nodes", "--name", cluster)
}
func (k *Cluster) kindCreateCmd(ctx context.Context, cluster, configFile string) *exec.Cmd {
func (k *Kind) kindCreateCmd(ctx context.Context, cluster, configFile string) *exec.Cmd {
return exec.CommandContext(ctx, "kind", "create", "cluster", "--name", cluster, "--config", configFile)
}
type DockerBuild struct {
Dockerfile string
Args []BuildArg
Image ContainerImage
}
type BuildArg struct {
Name, Value string
}
func (k *Cluster) BuildImages(ctx context.Context, builds []DockerBuild) error {
for _, build := range builds {
var args []string
args = append(args, "--build-arg=TARGETPLATFORM="+"linux/amd64")
for _, buildArg := range build.Args {
args = append(args, "--build-arg="+buildArg.Name+"="+buildArg.Value)
}
_, err := k.combinedOutput(k.dockerBuildCmd(ctx, build.Dockerfile, build.Image.Repo, build.Image.Tag, args))
if err != nil {
return fmt.Errorf("failed building %v: %w", build, err)
}
}
return nil
}
func (k *Cluster) dockerBuildCmd(ctx context.Context, dockerfile, repo, tag string, args []string) *exec.Cmd {
buildContext := filepath.Dir(dockerfile)
args = append([]string{"build", "--tag", repo + ":" + tag, "-f", dockerfile, buildContext}, args...)
cmd := exec.CommandContext(ctx, "docker", args...)
return cmd
}
func (k *Cluster) LoadImages(ctx context.Context, images []ContainerImage) error {
func (k *Kind) LoadImages(ctx context.Context, images []ContainerImage) error {
for _, img := range images {
const maxRetries = 5
@@ -236,7 +365,7 @@ func (k *Cluster) LoadImages(ctx context.Context, images []ContainerImage) error
}()
for i := 0; i <= maxRetries; i++ {
out, err := k.combinedOutput(k.kindLoadDockerImageCmd(ctx, k.Name, img.Repo, img.Tag, tmpDir))
out, err := k.CombinedOutput(k.kindLoadDockerImageCmd(ctx, k.Name, img.Repo, img.Tag, tmpDir))
out = strings.TrimSpace(out)
@@ -256,7 +385,7 @@ func (k *Cluster) LoadImages(ctx context.Context, images []ContainerImage) error
return nil
}
func (k *Cluster) kindLoadDockerImageCmd(ctx context.Context, cluster, repo, tag, tmpDir string) *exec.Cmd {
func (k *Kind) kindLoadDockerImageCmd(ctx context.Context, cluster, repo, tag, tmpDir string) *exec.Cmd {
cmd := exec.CommandContext(ctx, "kind", "--loglevel=trace", "load", "docker-image", repo+":"+tag, "--name", cluster)
cmd.Env = os.Environ()
// Set TMPDIR to somewhere under $HOME when you use docker installed with Ubuntu snap
@@ -271,9 +400,9 @@ func (k *Cluster) kindLoadDockerImageCmd(ctx context.Context, cluster, repo, tag
return cmd
}
func (k *Cluster) PullImages(ctx context.Context, images []ContainerImage) error {
func (k *Kind) PullImages(ctx context.Context, images []ContainerImage) error {
for _, img := range images {
_, err := k.combinedOutput(k.dockerPullCmd(ctx, img.Repo, img.Tag))
_, err := k.CombinedOutput(k.dockerPullCmd(ctx, img.Repo, img.Tag))
if err != nil {
return err
}
@@ -282,11 +411,11 @@ func (k *Cluster) PullImages(ctx context.Context, images []ContainerImage) error
return nil
}
func (k *Cluster) dockerPullCmd(ctx context.Context, repo, tag string) *exec.Cmd {
func (k *Kind) dockerPullCmd(ctx context.Context, repo, tag string) *exec.Cmd {
return exec.CommandContext(ctx, "docker", "pull", repo+":"+tag)
}
func (k *Cluster) Stop(ctx context.Context) error {
func (k *Kind) Stop(ctx context.Context) error {
if err := k.kindDeleteCmd(ctx, k.Name).Run(); err != nil {
return err
}
@@ -294,11 +423,11 @@ func (k *Cluster) Stop(ctx context.Context) error {
return nil
}
func (k *Cluster) kindDeleteCmd(ctx context.Context, cluster string) *exec.Cmd {
func (k *Kind) kindDeleteCmd(ctx context.Context, cluster string) *exec.Cmd {
return exec.CommandContext(ctx, "kind", "delete", "cluster", "--name", cluster)
}
func (k *Cluster) writeKubeconfig(ctx context.Context) error {
func (k *Kind) writeKubeconfig(ctx context.Context) error {
var err error
k.kubeconfig, err = filepath.Abs(filepath.Join(k.Dir, "kubeconfig"))
@@ -313,147 +442,10 @@ func (k *Cluster) writeKubeconfig(ctx context.Context) error {
return nil
}
func (k *Cluster) kindExportKubeconfigCmd(ctx context.Context, cluster, path string) *exec.Cmd {
func (k *Kind) kindExportKubeconfigCmd(ctx context.Context, cluster, path string) *exec.Cmd {
cmd := exec.CommandContext(ctx, "kind", "export", "kubeconfig", "--name", cluster)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, "KUBECONFIG="+path)
return cmd
}
type KubectlConfig struct {
Env []string
NoValidate bool
Timeout time.Duration
Namespace string
}
func (k KubectlConfig) WithTimeout(o time.Duration) KubectlConfig {
k.Timeout = o
return k
}
func (k *Cluster) RunKubectlEnsureNS(ctx context.Context, name string, cfg KubectlConfig) error {
if _, err := k.combinedOutput(k.kubectlCmd(ctx, "get", []string{"ns", name}, cfg)); err != nil {
if _, err := k.combinedOutput(k.kubectlCmd(ctx, "create", []string{"ns", name}, cfg)); err != nil {
return err
}
}
return nil
}
func (k *Cluster) GetClusterRoleBinding(ctx context.Context, name string, cfg KubectlConfig) (string, error) {
o, err := k.combinedOutput(k.kubectlCmd(ctx, "get", []string{"clusterrolebinding", name}, cfg))
if err != nil {
return "", err
}
return o, nil
}
func (k *Cluster) CreateClusterRoleBindingServiceAccount(ctx context.Context, name string, clusterrole string, sa string, cfg KubectlConfig) error {
_, err := k.combinedOutput(k.kubectlCmd(ctx, "create", []string{"clusterrolebinding", name, "--clusterrole=" + clusterrole, "--serviceaccount=" + sa}, cfg))
if err != nil {
return err
}
return nil
}
func (k *Cluster) GetCMLiterals(ctx context.Context, name string, cfg KubectlConfig) (map[string]string, error) {
o, err := k.combinedOutput(k.kubectlCmd(ctx, "get", []string{"cm", name, "-o=json"}, cfg))
if err != nil {
return nil, err
}
var cm struct {
Data map[string]string `json:"data"`
}
if err := json.Unmarshal([]byte(o), &cm); err != nil {
k.errorf("Failed unmarshalling this data to JSON:\n%s\n", o)
return nil, fmt.Errorf("unmarshalling json: %w", err)
}
return cm.Data, nil
}
func (k *Cluster) CreateCMLiterals(ctx context.Context, name string, literals map[string]string, cfg KubectlConfig) error {
args := []string{"cm", name}
for k, v := range literals {
args = append(args, fmt.Sprintf("--from-literal=%s=%s", k, v))
}
if _, err := k.combinedOutput(k.kubectlCmd(ctx, "create", args, cfg)); err != nil {
return err
}
return nil
}
func (k *Cluster) Apply(ctx context.Context, path string, cfg KubectlConfig) error {
if _, err := k.combinedOutput(k.kubectlCmd(ctx, "apply", []string{"-f", path}, cfg)); err != nil {
return err
}
return nil
}
func (k *Cluster) WaitUntilDeployAvailable(ctx context.Context, name string, cfg KubectlConfig) error {
if _, err := k.combinedOutput(k.kubectlCmd(ctx, "wait", []string{"deploy/" + name, "--for=condition=available"}, cfg)); err != nil {
return err
}
return nil
}
func (k *Cluster) kubectlCmd(ctx context.Context, c string, args []string, cfg KubectlConfig) *exec.Cmd {
args = append([]string{c}, args...)
if cfg.NoValidate {
args = append(args, "--validate=false")
}
if cfg.Namespace != "" {
args = append(args, "-n="+cfg.Namespace)
}
if cfg.Timeout > 0 {
args = append(args, "--timeout="+fmt.Sprintf("%s", cfg.Timeout))
}
cmd := exec.CommandContext(ctx, "kubectl", args...)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, cfg.Env...)
return cmd
}
type ScriptConfig struct {
Env []string
Dir string
}
func (k *Cluster) RunScript(ctx context.Context, path string, cfg ScriptConfig) error {
abs, err := filepath.Abs(path)
if err != nil {
return err
}
if _, err := k.combinedOutput(k.bashRunScriptCmd(ctx, abs, cfg)); err != nil {
return err
}
return nil
}
func (k *Cluster) bashRunScriptCmd(ctx context.Context, path string, cfg ScriptConfig) *exec.Cmd {
cmd := exec.CommandContext(ctx, "bash", path)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, cfg.Env...)
cmd.Dir = cfg.Dir
return cmd
}