Compare commits

...

14 Commits

Author SHA1 Message Date
Nikola Jokic
4de51ee6a5 random handle and random image name 2022-09-21 17:23:16 +02:00
Nikola Jokic
c8e272367f Merge branch 'main' into nikola-jokic/kaniko 2022-09-21 15:32:21 +02:00
Nikola Jokic
c4aa97c974 included generation of random handle/image 2022-09-21 15:29:39 +02:00
Nikola Jokic
f400db92cc Fixed invocation of registry. Basic run works hardcoded
Console logs are left in place and should be deleted
2022-09-21 13:54:25 +02:00
Nikola Jokic
5f0dc3f3b6 created base resource deffinitions for registry and kaniko 2022-09-21 10:39:04 +02:00
Thomas Boop
16eb238caa 0.1.3 release notes (#26) 2022-08-16 15:43:31 +02:00
Nikola Jokic
8e06496e34 fixing defaulting to docker hub on private registry, and b64 encoding (#25) 2022-08-16 09:30:58 -04:00
Nikola Jokic
6ef042836f fixing defaulting to docker hub on private registry, and b64 encoding 2022-07-29 13:27:17 +02:00
Thomas Boop
e2033b29c7 0.1.2 release (#22)
* 0.1.2 release

* trace the error and show a user readable message
2022-06-23 08:57:14 -04:00
Nikola Jokic
eb47baaf5e Adding more tests and minor changes in code (#21)
* added cleanup job checks, started testing constants file

* added getVolumeClaimName test

* added write entrypoint tests

* added tests around k8s utils

* fixed new regexp

* added tests around runner instance label

* 100% test coverage of constants
2022-06-22 14:15:42 -04:00
Nikola Jokic
20c19dae27 refactor around job claim name and runner instance labels (#20)
* refactor around job claim name, and runner instance labels

* repaired failing test
2022-06-22 09:32:50 -04:00
Thomas Boop
4307828719 Don't use JSON.stringify for errors (#19)
* better error handling

* remove unneeded catch

* Update index.ts
2022-06-22 15:20:48 +02:00
Thomas Boop
5c6995dba1 Add Akvelon to codeowners 2022-06-22 09:06:20 -04:00
Thomas Boop
bb1a033ed7 Make K8s claim name optional (#18)
* make claim name optional

* update version and notes

* fix ci

* correctly invoke function
2022-06-20 15:09:04 -04:00
20 changed files with 759 additions and 83 deletions

View File

@@ -1 +1 @@
* @actions/actions-runtime
* @actions/actions-runtime @actions/runner-akvelon

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "hooks",
"version": "0.1.0",
"version": "0.1.3",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "hooks",
"version": "0.1.0",
"version": "0.1.3",
"license": "MIT",
"devDependencies": {
"@types/jest": "^27.5.1",

View File

@@ -1,6 +1,6 @@
{
"name": "hooks",
"version": "0.1.0",
"version": "0.1.3",
"description": "Three projects are included - k8s: a kubernetes hook implementation that spins up pods dynamically to run a job - docker: A hook implementation of the runner's docker implementation - A hook lib, which contains shared typescript definitions and utilities that the other packages consume",
"main": "",
"directories": {

View File

@@ -52,7 +52,9 @@ describe('run script step', () => {
definitions.runScriptStep.args.entryPoint = '/bin/bash'
definitions.runScriptStep.args.entryPointArgs = [
'-c',
`if [[ ! $(env | grep "^PATH=") = "PATH=${definitions.runScriptStep.args.prependPath}:"* ]]; then exit 1; fi`
`if [[ ! $(env | grep "^PATH=") = "PATH=${definitions.runScriptStep.args.prependPath.join(
':'
)}:"* ]]; then exit 1; fi`
]
await expect(
runScriptStep(definitions.runScriptStep.args, prepareJobResponse.state)

View File

@@ -32,7 +32,7 @@ rules:
- The `ACTIONS_RUNNER_POD_NAME` env should be set to the name of the pod
- The `ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER` env should be set to true to prevent the runner from running any jobs outside of a container
- The runner pod should map a persistent volume claim into the `_work` directory
- The `ACTIONS_RUNNER_CLAIM_NAME` env should be set to the persistent volume claim that contains the runner's working directory
- The `ACTIONS_RUNNER_CLAIM_NAME` env should be set to the persistent volume claim that contains the runner's working directory, otherwise it defaults to `${ACTIONS_RUNNER_POD_NAME}-work`
- Some actions runner env's are expected to be set. These are set automatically by the runner.
- `RUNNER_WORKSPACE` is expected to be set to the workspace of the runner
- `GITHUB_WORKSPACE` is expected to be set to the workspace of the job

View File

@@ -27,9 +27,7 @@ export function getStepPodName(): string {
export function getVolumeClaimName(): string {
const name = process.env.ACTIONS_RUNNER_CLAIM_NAME
if (!name) {
throw new Error(
"'ACTIONS_RUNNER_CLAIM_NAME' is required, please contact your self hosted runner administrator"
)
return `${getRunnerPodName()}-work`
}
return name
}
@@ -41,14 +39,14 @@ export function getSecretName(): string {
)}-secret-${uuidv4().substring(0, STEP_POD_NAME_SUFFIX_LENGTH)}`
}
const MAX_POD_NAME_LENGTH = 63
const STEP_POD_NAME_SUFFIX_LENGTH = 8
export const MAX_POD_NAME_LENGTH = 63
export const STEP_POD_NAME_SUFFIX_LENGTH = 8
export const JOB_CONTAINER_NAME = 'job'
export class RunnerInstanceLabel {
runnerhook: string
private podName: string
constructor() {
this.runnerhook = process.env.ACTIONS_RUNNER_POD_NAME as string
this.podName = getRunnerPodName()
}
get key(): string {
@@ -56,10 +54,10 @@ export class RunnerInstanceLabel {
}
get value(): string {
return this.runnerhook
return this.podName
}
toString(): string {
return `runner-pod=${this.runnerhook}`
return `runner-pod=${this.podName}`
}
}

View File

@@ -46,10 +46,10 @@ export async function prepareJob(
}
let createdPod: k8s.V1Pod | undefined = undefined
try {
createdPod = await createPod(container, services, args.registry)
createdPod = await createPod(container, services, args.container.registry)
} catch (err) {
await prunePods()
throw new Error(`failed to create job pod: ${JSON.stringify(err)}`)
throw new Error(`failed to create job pod: ${err}`)
}
if (!createdPod?.metadata?.name) {

View File

@@ -1,4 +1,5 @@
import * as core from '@actions/core'
import { v4 as uuidv4 } from 'uuid'
import * as k8s from '@kubernetes/client-node'
import { RunContainerStepArgs } from 'hooklib'
import {
@@ -8,7 +9,8 @@ import {
getPodLogs,
getPodStatus,
waitForJobToComplete,
waitForPodPhases
waitForPodPhases,
containerBuild
} from '../k8s'
import {
containerVolumes,
@@ -23,6 +25,8 @@ export async function runContainerStep(
stepContainer: RunContainerStepArgs
): Promise<number> {
if (stepContainer.dockerfile) {
const imagePath = `${generateBuildHandle()}/${generateBuildTag()}`
await containerBuild(stepContainer, imagePath)
throw new Error('Building container actions is not currently supported')
}
@@ -108,3 +112,20 @@ function createPodSpec(
return podContainer
}
function generateBuildTag(): string {
return `${generateRandomString()}:${uuidv4().substring(0, 6)}`
}
function generateBuildHandle(): string {
return generateRandomString()
}
function generateRandomString(length = 10): string {
let v = ''
const chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
for (let i = 0; i < length; i++) {
v += chars.charAt(Math.floor(Math.random() * length))
}
return v
}

View File

@@ -28,7 +28,7 @@ export async function runScriptStep(
JOB_CONTAINER_NAME
)
} catch (err) {
throw new Error(`failed to run script step: ${JSON.stringify(err)}`)
throw new Error(`failed to run script step: ${err}`)
} finally {
fs.rmSync(runnerPath)
}

View File

@@ -22,7 +22,7 @@ async function run(): Promise<void> {
throw new Error(
`The Service account needs the following permissions ${JSON.stringify(
requiredPermissions
)} on the pod resource in the '${namespace}' namespace. Please contact your self hosted runner administrator.`
)} on the pod resource in the '${namespace()}' namespace. Please contact your self hosted runner administrator.`
)
}
switch (command) {

View File

@@ -1,5 +1,6 @@
import * as core from '@actions/core'
import * as k8s from '@kubernetes/client-node'
import { ContainerInfo, Registry } from 'hooklib'
import { RunContainerStepArgs, ContainerInfo, Registry } from 'hooklib'
import * as stream from 'stream'
import {
getJobPodName,
@@ -9,6 +10,13 @@ import {
getVolumeClaimName,
RunnerInstanceLabel
} from '../hooks/constants'
import {
registryConfigMap,
registrySecret,
registryStatefulSet,
registryService,
kanikoPod
} from './kaniko'
import { PodPhase } from './utils'
const kc = new k8s.KubeConfig()
@@ -17,6 +25,7 @@ kc.loadFromDefault()
const k8sApi = kc.makeApiClient(k8s.CoreV1Api)
const k8sBatchV1Api = kc.makeApiClient(k8s.BatchV1Api)
const k8sAppsV1 = kc.makeApiClient(k8s.AppsV1Api)
const k8sAuthorizationV1Api = kc.makeApiClient(k8s.AuthorizationV1Api)
export const POD_VOLUME_NAME = 'work'
@@ -51,6 +60,12 @@ export const requiredPermissions = [
verbs: ['create', 'delete', 'get', 'list'],
resource: 'secrets',
subresource: ''
},
{
group: '',
verbs: ['create', 'delete', 'get', 'list'],
resource: 'configmaps',
subresource: ''
}
]
@@ -109,13 +124,14 @@ export async function createPod(
export async function createJob(
container: k8s.V1Container
): Promise<k8s.V1Job> {
const job = new k8s.V1Job()
const runnerInstanceLabel = new RunnerInstanceLabel()
const job = new k8s.V1Job()
job.apiVersion = 'batch/v1'
job.kind = 'Job'
job.metadata = new k8s.V1ObjectMeta()
job.metadata.name = getStepPodName()
job.metadata.labels = { 'runner-pod': getRunnerPodName() }
job.metadata.labels = { [runnerInstanceLabel.key]: runnerInstanceLabel.value }
job.spec = new k8s.V1JobSpec()
job.spec.ttlSecondsAfterFinished = 300
@@ -127,7 +143,7 @@ export async function createJob(
job.spec.template.spec.restartPolicy = 'Never'
job.spec.template.spec.nodeName = await getCurrentNodeName()
const claimName = `${runnerName()}-work`
const claimName = getVolumeClaimName()
job.spec.template.spec.volumes = [
{
name: 'work',
@@ -185,33 +201,30 @@ export async function execPodStep(
): Promise<void> {
const exec = new k8s.Exec(kc)
await new Promise(async function (resolve, reject) {
try {
await exec.exec(
namespace(),
podName,
containerName,
command,
process.stdout,
process.stderr,
stdin ?? null,
false /* tty */,
resp => {
// kube.exec returns an error if exit code is not 0, but we can't actually get the exit code
if (resp.status === 'Success') {
resolve(resp.code)
} else {
reject(
JSON.stringify({
message: resp?.message,
details: resp?.details
})
)
}
await exec.exec(
namespace(),
podName,
containerName,
command,
process.stdout,
process.stderr,
stdin ?? null,
false /* tty */,
resp => {
// kube.exec returns an error if exit code is not 0, but we can't actually get the exit code
if (resp.status === 'Success') {
resolve(resp.code)
} else {
core.debug(
JSON.stringify({
message: resp?.message,
details: resp?.details
})
)
reject(resp?.message)
}
)
} catch (error) {
reject(JSON.stringify(error))
}
}
)
})
}
@@ -234,29 +247,34 @@ export async function createDockerSecret(
): Promise<k8s.V1Secret> {
const authContent = {
auths: {
[registry.serverUrl]: {
[registry.serverUrl || 'https://index.docker.io/v1/']: {
username: registry.username,
password: registry.password,
auth: Buffer.from(
`${registry.username}:${registry.password}`,
auth: Buffer.from(`${registry.username}:${registry.password}`).toString(
'base64'
).toString()
)
}
}
}
const runnerInstanceLabel = new RunnerInstanceLabel()
const secretName = getSecretName()
const secret = new k8s.V1Secret()
secret.immutable = true
secret.apiVersion = 'v1'
secret.metadata = new k8s.V1ObjectMeta()
secret.metadata.name = secretName
secret.metadata.labels = { 'runner-pod': getRunnerPodName() }
secret.metadata.namespace = namespace()
secret.metadata.labels = {
[runnerInstanceLabel.key]: runnerInstanceLabel.value
}
secret.type = 'kubernetes.io/dockerconfigjson'
secret.kind = 'Secret'
secret.data = {
'.dockerconfigjson': Buffer.from(
JSON.stringify(authContent),
'.dockerconfigjson': Buffer.from(JSON.stringify(authContent)).toString(
'base64'
).toString()
)
}
const { body } = await k8sApi.createNamespacedSecret(namespace(), secret)
@@ -266,13 +284,18 @@ export async function createDockerSecret(
export async function createSecretForEnvs(envs: {
[key: string]: string
}): Promise<string> {
const runnerInstanceLabel = new RunnerInstanceLabel()
const secret = new k8s.V1Secret()
const secretName = getSecretName()
secret.immutable = true
secret.apiVersion = 'v1'
secret.metadata = new k8s.V1ObjectMeta()
secret.metadata.name = secretName
secret.metadata.labels = { 'runner-pod': getRunnerPodName() }
secret.metadata.labels = {
[runnerInstanceLabel.key]: runnerInstanceLabel.value
}
secret.kind = 'Secret'
secret.data = {}
for (const [key, value] of Object.entries(envs)) {
@@ -317,7 +340,14 @@ export async function waitForPodPhases(
let phase: PodPhase = PodPhase.UNKNOWN
try {
while (true) {
phase = await getPodPhase(podName)
try {
phase = await getPodPhase(podName)
} catch (err) {
const e = err as k8s.HttpError
if (e?.body?.reason === 'NotFound') {
phase = PodPhase.UNKNOWN
}
}
if (awaitingPhases.has(phase)) {
return
}
@@ -372,7 +402,7 @@ export async function getPodLogs(
})
logStream.on('error', err => {
process.stderr.write(JSON.stringify(err))
process.stderr.write(err.message)
})
const r = await log.log(namespace(), podName, containerName, logStream, {
@@ -455,6 +485,45 @@ export async function isPodContainerAlpine(
return isAlpine
}
export async function containerBuild(
args: RunContainerStepArgs,
imagePath: string
): Promise<void> {
const cm = registryConfigMap()
const secret = registrySecret()
const ss = registryStatefulSet()
const svc = registryService()
const pod = kanikoPod(args.workingDirectory, imagePath)
await Promise.all([
k8sApi.createNamespacedConfigMap(namespace(), cm),
k8sApi.createNamespacedSecret(namespace(), secret)
])
try {
await k8sAppsV1.createNamespacedStatefulSet(namespace(), ss)
await waitForPodPhases(
'docker-registry-0',
new Set([PodPhase.RUNNING]),
new Set([PodPhase.PENDING, PodPhase.UNKNOWN])
)
} catch (err) {
console.log(err)
console.log(JSON.stringify(err))
throw err
}
try {
await k8sApi.createNamespacedService(namespace(), svc)
} catch (err) {
console.log(JSON.stringify(err))
throw err
}
try {
await k8sApi.createNamespacedPod(namespace(), pod)
} catch (err) {
console.log(JSON.stringify(err))
throw err
}
}
async function getCurrentNodeName(): Promise<string> {
const resp = await k8sApi.readNamespacedPod(getRunnerPodName(), namespace())
@@ -478,16 +547,6 @@ export function namespace(): string {
return context.namespace
}
function runnerName(): string {
const name = process.env.ACTIONS_RUNNER_POD_NAME
if (!name) {
throw new Error(
'Failed to determine runner name. "ACTIONS_RUNNER_POD_NAME" env variables should be set.'
)
}
return name
}
class BackOffManager {
private backOffSeconds = 1
totalTime = 0

View File

@@ -0,0 +1,208 @@
import * as k8s from '@kubernetes/client-node'
const REGISTRY_CONFIG_MAP_YAML = `
storage:
filesystem:
rootdirectory: /var/lib/registry
maxthreads: 100
health:
storagedriver:
enabled: true
interval: 10s
threshold: 3
http:
addr: :5000
headers:
X-Content-Type-Options:
- nosniff
log:
fields:
service: registry
storage:
cache:
blobdescriptor: inmemory
version: 0.1
`.trim()
export function registryConfigMap(): k8s.V1ConfigMap {
const cm = new k8s.V1ConfigMap()
cm.apiVersion = 'v1'
cm.data = {
'config.yaml': REGISTRY_CONFIG_MAP_YAML
}
cm.kind = 'ConfigMap'
cm.metadata = new k8s.V1ObjectMeta()
cm.metadata.labels = { app: 'docker-registry' }
cm.metadata.name = 'docker-registry-config'
// TODO: make this configurable
return cm
}
export function registrySecret(): k8s.V1Secret {
const secret = new k8s.V1Secret()
secret.apiVersion = 'v1'
secret.data = { haSharedSecret: 'U29tZVZlcnlTdHJpbmdTZWNyZXQK' }
secret.kind = 'Secret'
secret.metadata = new k8s.V1ObjectMeta()
secret.metadata.labels = {
app: 'docker-registry',
chart: 'docker-registry-1.4.3'
}
secret.metadata.name = 'docker-registry-secret'
secret.type = 'Opaque'
return secret
}
export function registryStatefulSet(): k8s.V1StatefulSet {
const ss = new k8s.V1StatefulSet()
ss.apiVersion = 'apps/v1'
ss.metadata = new k8s.V1ObjectMeta()
ss.metadata.name = 'docker-registry'
const spec = new k8s.V1StatefulSetSpec()
spec.selector = new k8s.V1LabelSelector()
spec.selector.matchLabels = { app: 'docker-registry' }
spec.serviceName = 'registry'
spec.replicas = 1
const tmpl = new k8s.V1PodTemplateSpec()
tmpl.metadata = new k8s.V1ObjectMeta()
tmpl.metadata.labels = { app: 'docker-registry' }
tmpl.spec = new k8s.V1PodSpec()
tmpl.spec.terminationGracePeriodSeconds = 5 // TODO: figure out for how long
const c = new k8s.V1Container()
c.command = ['/bin/registry', 'serve', '/etc/docker/registry/config.yaml']
c.env = [
{
name: 'REGISTRY_HTTP_SECRET',
valueFrom: {
secretKeyRef: {
key: 'haSharedSecret',
name: 'docker-registry-secret'
}
}
},
{
name: 'REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY',
value: '/var/lib/registry'
}
]
c.image = 'registry:2.6.2'
c.name = 'docker-registry'
c.imagePullPolicy = 'IfNotPresent'
c.ports = [
{
containerPort: 5000,
protocol: 'TCP'
}
]
c.volumeMounts = [
{
mountPath: '/etc/docker/registry',
name: 'docker-registry-config'
}
]
c.livenessProbe = new k8s.V1Probe()
c.livenessProbe.failureThreshold = 3
c.livenessProbe.periodSeconds = 10
c.livenessProbe.successThreshold = 1
c.livenessProbe.timeoutSeconds = 1
c.livenessProbe.httpGet = new k8s.V1HTTPGetAction()
c.livenessProbe.httpGet.path = '/'
c.livenessProbe.httpGet.port = 5000
c.livenessProbe.httpGet.scheme = 'HTTP'
c.readinessProbe = new k8s.V1Probe()
c.readinessProbe.failureThreshold = 3
c.readinessProbe.periodSeconds = 10
c.readinessProbe.successThreshold = 1
c.readinessProbe.timeoutSeconds = 1
c.readinessProbe.httpGet = new k8s.V1HTTPGetAction()
c.readinessProbe.httpGet.path = '/'
c.readinessProbe.httpGet.port = 5000
c.readinessProbe.httpGet.scheme = 'HTTP'
tmpl.spec.containers = [c]
tmpl.spec.volumes = [
{
name: 'docker-registry-config',
configMap: {
name: 'docker-registry-config'
}
}
]
spec.template = tmpl
ss.spec = spec
return ss
}
export function registryService(): k8s.V1Service {
const svc = new k8s.V1Service()
svc.apiVersion = 'v1'
svc.kind = 'Service'
svc.metadata = new k8s.V1ObjectMeta()
svc.metadata.name = 'docker-registry'
svc.metadata.labels = {
app: 'docker-registry'
}
const spec = new k8s.V1ServiceSpec()
spec.externalTrafficPolicy = 'Cluster'
spec.ports = [
{
name: 'registry',
nodePort: 31500,
port: 5000,
protocol: 'TCP',
targetPort: 5000
}
]
spec.selector = {
app: 'docker-registry'
}
spec.sessionAffinity = 'None'
spec.type = 'NodePort'
svc.spec = spec
return svc
}
export function kanikoPod(
workingDirectory: string, // git://github.com/<handle>/<repo>
imagePath: string // <handle>/<image>:<tag>
): k8s.V1Pod {
const pod = new k8s.V1Pod()
pod.apiVersion = 'v1'
pod.kind = 'Pod'
pod.metadata = new k8s.V1ObjectMeta()
pod.metadata.name = 'kaniko'
const spec = new k8s.V1PodSpec()
const c = new k8s.V1Container()
c.image = 'gcr.io/kaniko-project/executor:latest'
c.name = 'kaniko'
c.imagePullPolicy = 'Always'
c.env = [
{
name: 'GIT_TOKEN',
value: process.env.GITHUB_TOKEN
}
]
c.args = [
'--dockerfile=Dockerfile',
`--context=${workingDirectory}`,
`--destination=docker-registry.default.svc.cluster.local:5000/${imagePath}`
]
spec.containers = [c]
spec.dnsPolicy = 'ClusterFirst'
spec.restartPolicy = 'Never'
pod.spec = spec
return pod
}

View File

@@ -20,18 +20,18 @@ export function containerVolumes(
}
]
const workspacePath = process.env.GITHUB_WORKSPACE as string
if (containerAction) {
const workspace = process.env.GITHUB_WORKSPACE as string
mounts.push(
{
name: POD_VOLUME_NAME,
mountPath: '/github/workspace',
subPath: workspace.substring(workspace.indexOf('work/') + 1)
subPath: workspacePath.substring(workspacePath.indexOf('work/') + 1)
},
{
name: POD_VOLUME_NAME,
mountPath: '/github/file_commands',
subPath: workspace.substring(workspace.indexOf('work/') + 1)
subPath: workspacePath.substring(workspacePath.indexOf('work/') + 1)
}
)
return mounts
@@ -63,7 +63,6 @@ export function containerVolumes(
return mounts
}
const workspacePath = process.env.GITHUB_WORKSPACE as string
for (const userVolume of userMountVolumes) {
let sourceVolumePath = ''
if (path.isAbsolute(userVolume.sourceVolumePath)) {

View File

@@ -0,0 +1,20 @@
import { containerBuild } from '../src/k8s'
jest.useRealTimers()
describe('container build', () => {
beforeAll(async () => {
process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE'] = 'default'
})
it('should finish without throwing an exception', async () => {
await expect(
containerBuild(
{
workingDirectory: 'git://github.com/nikola-jokic/dockeraction.git'
},
'randhandle/randimg:123123'
)
).resolves.not.toThrow()
})
})

View File

@@ -1,4 +1,7 @@
import * as k8s from '@kubernetes/client-node'
import { cleanupJob, prepareJob } from '../src/hooks'
import { RunnerInstanceLabel } from '../src/hooks/constants'
import { namespace } from '../src/k8s'
import { TestHelper } from './test-setup'
let testHelper: TestHelper
@@ -13,10 +16,50 @@ describe('Cleanup Job', () => {
)
await prepareJob(prepareJobData.args, prepareJobOutputFilePath)
})
it('should not throw', async () => {
await expect(cleanupJob()).resolves.not.toThrow()
})
afterEach(async () => {
await testHelper.cleanup()
})
it('should not throw', async () => {
await expect(cleanupJob()).resolves.not.toThrow()
})
it('should have no runner linked pods running', async () => {
await cleanupJob()
const kc = new k8s.KubeConfig()
kc.loadFromDefault()
const k8sApi = kc.makeApiClient(k8s.CoreV1Api)
const podList = await k8sApi.listNamespacedPod(
namespace(),
undefined,
undefined,
undefined,
undefined,
new RunnerInstanceLabel().toString()
)
expect(podList.body.items.length).toBe(0)
})
it('should have no runner linked secrets', async () => {
await cleanupJob()
const kc = new k8s.KubeConfig()
kc.loadFromDefault()
const k8sApi = kc.makeApiClient(k8s.CoreV1Api)
const secretList = await k8sApi.listNamespacedSecret(
namespace(),
undefined,
undefined,
undefined,
undefined,
new RunnerInstanceLabel().toString()
)
expect(secretList.body.items.length).toBe(0)
})
})

View File

@@ -0,0 +1,173 @@
import {
getJobPodName,
getRunnerPodName,
getSecretName,
getStepPodName,
getVolumeClaimName,
MAX_POD_NAME_LENGTH,
RunnerInstanceLabel,
STEP_POD_NAME_SUFFIX_LENGTH
} from '../src/hooks/constants'
describe('constants', () => {
describe('runner instance label', () => {
beforeEach(() => {
process.env.ACTIONS_RUNNER_POD_NAME = 'example'
})
it('should throw if ACTIONS_RUNNER_POD_NAME env is not set', () => {
delete process.env.ACTIONS_RUNNER_POD_NAME
expect(() => new RunnerInstanceLabel()).toThrow()
})
it('should have key truthy', () => {
const runnerInstanceLabel = new RunnerInstanceLabel()
expect(typeof runnerInstanceLabel.key).toBe('string')
expect(runnerInstanceLabel.key).toBeTruthy()
expect(runnerInstanceLabel.key.length).toBeGreaterThan(0)
})
it('should have value as runner pod name', () => {
const name = process.env.ACTIONS_RUNNER_POD_NAME as string
const runnerInstanceLabel = new RunnerInstanceLabel()
expect(typeof runnerInstanceLabel.value).toBe('string')
expect(runnerInstanceLabel.value).toBe(name)
})
it('should have toString combination of key and value', () => {
const runnerInstanceLabel = new RunnerInstanceLabel()
expect(runnerInstanceLabel.toString()).toBe(
`${runnerInstanceLabel.key}=${runnerInstanceLabel.value}`
)
})
})
describe('getRunnerPodName', () => {
it('should throw if ACTIONS_RUNNER_POD_NAME env is not set', () => {
delete process.env.ACTIONS_RUNNER_POD_NAME
expect(() => getRunnerPodName()).toThrow()
process.env.ACTIONS_RUNNER_POD_NAME = ''
expect(() => getRunnerPodName()).toThrow()
})
it('should return corrent ACTIONS_RUNNER_POD_NAME name', () => {
const name = 'example'
process.env.ACTIONS_RUNNER_POD_NAME = name
expect(getRunnerPodName()).toBe(name)
})
})
describe('getJobPodName', () => {
it('should throw on getJobPodName if ACTIONS_RUNNER_POD_NAME env is not set', () => {
delete process.env.ACTIONS_RUNNER_POD_NAME
expect(() => getJobPodName()).toThrow()
process.env.ACTIONS_RUNNER_POD_NAME = ''
expect(() => getRunnerPodName()).toThrow()
})
it('should contain suffix -workflow', () => {
const tableTests = [
{
podName: 'test',
expect: 'test-workflow'
},
{
// podName.length == 63
podName:
'abcdaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
expect:
'abcdaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-workflow'
}
]
for (const tt of tableTests) {
process.env.ACTIONS_RUNNER_POD_NAME = tt.podName
const actual = getJobPodName()
expect(actual).toBe(tt.expect)
}
})
})
describe('getVolumeClaimName', () => {
it('should throw if ACTIONS_RUNNER_POD_NAME env is not set', () => {
delete process.env.ACTIONS_RUNNER_CLAIM_NAME
delete process.env.ACTIONS_RUNNER_POD_NAME
expect(() => getVolumeClaimName()).toThrow()
process.env.ACTIONS_RUNNER_POD_NAME = ''
expect(() => getVolumeClaimName()).toThrow()
})
it('should return ACTIONS_RUNNER_CLAIM_NAME env if set', () => {
const claimName = 'testclaim'
process.env.ACTIONS_RUNNER_CLAIM_NAME = claimName
process.env.ACTIONS_RUNNER_POD_NAME = 'example'
expect(getVolumeClaimName()).toBe(claimName)
})
it('should contain suffix -work if ACTIONS_RUNNER_CLAIM_NAME is not set', () => {
delete process.env.ACTIONS_RUNNER_CLAIM_NAME
process.env.ACTIONS_RUNNER_POD_NAME = 'example'
expect(getVolumeClaimName()).toBe('example-work')
})
})
describe('getSecretName', () => {
it('should throw if ACTIONS_RUNNER_POD_NAME env is not set', () => {
delete process.env.ACTIONS_RUNNER_POD_NAME
expect(() => getSecretName()).toThrow()
process.env.ACTIONS_RUNNER_POD_NAME = ''
expect(() => getSecretName()).toThrow()
})
it('should contain suffix -secret- and name trimmed', () => {
const podNames = [
'test',
'abcdaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
]
for (const podName of podNames) {
process.env.ACTIONS_RUNNER_POD_NAME = podName
const actual = getSecretName()
const re = new RegExp(
`${podName.substring(
MAX_POD_NAME_LENGTH -
'-secret-'.length -
STEP_POD_NAME_SUFFIX_LENGTH
)}-secret-[a-z0-9]{8,}`
)
expect(actual).toMatch(re)
}
})
})
describe('getStepPodName', () => {
it('should throw if ACTIONS_RUNNER_POD_NAME env is not set', () => {
delete process.env.ACTIONS_RUNNER_POD_NAME
expect(() => getStepPodName()).toThrow()
process.env.ACTIONS_RUNNER_POD_NAME = ''
expect(() => getStepPodName()).toThrow()
})
it('should contain suffix -step- and name trimmed', () => {
const podNames = [
'test',
'abcdaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
]
for (const podName of podNames) {
process.env.ACTIONS_RUNNER_POD_NAME = podName
const actual = getStepPodName()
const re = new RegExp(
`${podName.substring(
MAX_POD_NAME_LENGTH - '-step-'.length - STEP_POD_NAME_SUFFIX_LENGTH
)}-step-[a-z0-9]{8,}`
)
expect(actual).toMatch(re)
}
})
})
})

View File

@@ -0,0 +1,153 @@
import * as fs from 'fs'
import { POD_VOLUME_NAME } from '../src/k8s'
import { containerVolumes, writeEntryPointScript } from '../src/k8s/utils'
import { TestHelper } from './test-setup'
let testHelper: TestHelper
describe('k8s utils', () => {
describe('write entrypoint', () => {
beforeEach(async () => {
testHelper = new TestHelper()
await testHelper.initialize()
})
afterEach(async () => {
await testHelper.cleanup()
})
it('should not throw', () => {
expect(() =>
writeEntryPointScript(
'/test',
'sh',
['-e', 'script.sh'],
['/prepend/path'],
{
SOME_ENV: 'SOME_VALUE'
}
)
).not.toThrow()
})
it('should throw if RUNNER_TEMP is not set', () => {
delete process.env.RUNNER_TEMP
expect(() =>
writeEntryPointScript(
'/test',
'sh',
['-e', 'script.sh'],
['/prepend/path'],
{
SOME_ENV: 'SOME_VALUE'
}
)
).toThrow()
})
it('should return object with containerPath and runnerPath', () => {
const { containerPath, runnerPath } = writeEntryPointScript(
'/test',
'sh',
['-e', 'script.sh'],
['/prepend/path'],
{
SOME_ENV: 'SOME_VALUE'
}
)
expect(containerPath).toMatch(/\/__w\/_temp\/.*\.sh/)
const re = new RegExp(`${process.env.RUNNER_TEMP}/.*\\.sh`)
expect(runnerPath).toMatch(re)
})
it('should write entrypoint path and the file should exist', () => {
const { runnerPath } = writeEntryPointScript(
'/test',
'sh',
['-e', 'script.sh'],
['/prepend/path'],
{
SOME_ENV: 'SOME_VALUE'
}
)
expect(fs.existsSync(runnerPath)).toBe(true)
})
})
describe('container volumes', () => {
beforeEach(async () => {
testHelper = new TestHelper()
await testHelper.initialize()
})
afterEach(async () => {
await testHelper.cleanup()
})
it('should throw if container action and GITHUB_WORKSPACE env is not set', () => {
delete process.env.GITHUB_WORKSPACE
expect(() => containerVolumes([], true, true)).toThrow()
expect(() => containerVolumes([], false, true)).toThrow()
})
it('should always have work mount', () => {
let volumes = containerVolumes([], true, true)
expect(volumes.find(e => e.mountPath === '/__w')).toBeTruthy()
volumes = containerVolumes([], true, false)
expect(volumes.find(e => e.mountPath === '/__w')).toBeTruthy()
volumes = containerVolumes([], false, true)
expect(volumes.find(e => e.mountPath === '/__w')).toBeTruthy()
volumes = containerVolumes([], false, false)
expect(volumes.find(e => e.mountPath === '/__w')).toBeTruthy()
})
it('should have container action volumes', () => {
let volumes = containerVolumes([], true, true)
expect(
volumes.find(e => e.mountPath === '/github/workspace')
).toBeTruthy()
expect(
volumes.find(e => e.mountPath === '/github/file_commands')
).toBeTruthy()
volumes = containerVolumes([], false, true)
expect(
volumes.find(e => e.mountPath === '/github/workspace')
).toBeTruthy()
expect(
volumes.find(e => e.mountPath === '/github/file_commands')
).toBeTruthy()
})
it('should have externals, github home and github workflow mounts if job container', () => {
const volumes = containerVolumes()
expect(volumes.find(e => e.mountPath === '/__e')).toBeTruthy()
expect(volumes.find(e => e.mountPath === '/github/home')).toBeTruthy()
expect(volumes.find(e => e.mountPath === '/github/workflow')).toBeTruthy()
})
it('should throw if user volume source volume path is not in workspace', () => {
expect(() =>
containerVolumes(
[
{
sourceVolumePath: '/outside/of/workdir'
}
],
true,
false
)
).toThrow()
})
it(`all volumes should have name ${POD_VOLUME_NAME}`, () => {
let volumes = containerVolumes([], true, true)
expect(volumes.every(e => e.name === POD_VOLUME_NAME)).toBeTruthy()
volumes = containerVolumes([], true, false)
expect(volumes.every(e => e.name === POD_VOLUME_NAME)).toBeTruthy()
volumes = containerVolumes([], false, true)
expect(volumes.every(e => e.name === POD_VOLUME_NAME)).toBeTruthy()
volumes = containerVolumes([], false, false)
expect(volumes.every(e => e.name === POD_VOLUME_NAME)).toBeTruthy()
})
})
})

View File

@@ -94,7 +94,9 @@ describe('Run script step', () => {
runScriptStepDefinition.args.entryPoint = '/bin/bash'
runScriptStepDefinition.args.entryPointArgs = [
'-c',
`'if [[ ! $(env | grep "^PATH=") = "PATH=${runScriptStepDefinition.args.prependPath}:"* ]]; then exit 1; fi'`
`'if [[ ! $(env | grep "^PATH=") = "PATH=${runScriptStepDefinition.args.prependPath.join(
':'
)}:"* ]]; then exit 1; fi'`
]
await expect(

View File

@@ -21,7 +21,6 @@ export class TestHelper {
public async initialize(): Promise<void> {
process.env['ACTIONS_RUNNER_POD_NAME'] = `${this.podName}`
process.env['ACTIONS_RUNNER_CLAIM_NAME'] = `${this.podName}-work`
process.env['RUNNER_WORKSPACE'] = `${this.tempDirPath}/_work/repo`
process.env['RUNNER_TEMP'] = `${this.tempDirPath}/_work/_temp`
process.env['GITHUB_WORKSPACE'] = `${this.tempDirPath}/_work/repo/repo`
@@ -41,7 +40,7 @@ export class TestHelper {
await this.createTestVolume()
await this.createTestJobPod()
} catch (e) {
console.log(JSON.stringify(e))
console.log(e)
}
}

View File

@@ -1,7 +1,6 @@
## Features
- Initial Release
## Bugs
- Fixed an issue where default private registry images did not pull correctly [#25]
## Misc