Initial Commit

This commit is contained in:
Thomas Boop
2022-06-02 15:53:11 -04:00
parent 4c8cc497b3
commit 6159767f90
70 changed files with 30723 additions and 0 deletions

10
packages/docker/README.md Normal file
View File

@@ -0,0 +1,10 @@
# Docker Hooks
## Description
This implementation mirrors the original docker implementation in the [Actions Runner](https://github.com/actions/runner).
Feel free to fork this repo and modify it in order to customize that implementation
## Pre-requisites
The `GITHUB_WORKSPACE` env will be set to the GitHub Workspace. This is done automatically by the actions runner, but may need to be done manually when testing
The docker cli is installed on the machine, and docker is running.

View File

@@ -0,0 +1,13 @@
// eslint-disable-next-line import/no-commonjs
module.exports = {
clearMocks: true,
moduleFileExtensions: ['js', 'ts'],
testEnvironment: 'node',
testMatch: ['**/*-test.ts'],
testRunner: 'jest-circus/runner',
transform: {
'^.+\\.ts$': 'ts-jest'
},
setupFilesAfterEnv: ['./jest.setup.js'],
verbose: true
}

View File

@@ -0,0 +1 @@
jest.setTimeout(90000)

9269
packages/docker/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,29 @@
{
"name": "dockerhooks",
"version": "0.1.0",
"description": "",
"main": "lib/index.js",
"scripts": {
"test": "jest --runInBand",
"build": "npx tsc && npx ncc build"
},
"author": "",
"license": "MIT",
"dependencies": {
"@actions/core": "^1.6.0",
"@actions/exec": "^1.1.1",
"hooklib": "file:../hooklib",
"uuid": "^8.3.2"
},
"devDependencies": {
"@types/jest": "^27.4.1",
"@types/node": "^17.0.23",
"@typescript-eslint/parser": "^5.18.0",
"@vercel/ncc": "^0.33.4",
"jest": "^27.5.1",
"ts-jest": "^27.1.4",
"ts-node": "^10.7.0",
"tsconfig-paths": "^3.14.1",
"typescript": "^4.6.3"
}
}

View File

@@ -0,0 +1,9 @@
export function getRunnerLabel(): string {
const name = process.env.RUNNER_NAME
if (!name) {
throw new Error(
"'RUNNER_NAME' env is required, please contact your self hosted runner administrator"
)
}
return Buffer.from(name).toString('hex')
}

View File

@@ -0,0 +1,413 @@
import * as core from '@actions/core'
import * as fs from 'fs'
import {
ContainerInfo,
JobContainerInfo,
RunContainerStepArgs,
ServiceContainerInfo,
StepContainerInfo
} from 'hooklib/lib'
import path from 'path'
import { env } from 'process'
import { v4 as uuidv4 } from 'uuid'
import { runDockerCommand, RunDockerCommandOptions } from '../utils'
import { getRunnerLabel } from './constants'
export async function createContainer(
args: ContainerInfo,
name: string,
network: string
): Promise<ContainerMetadata> {
if (!args.image) {
throw new Error('Image was expected')
}
const dockerArgs: string[] = ['create']
dockerArgs.push(`--label=${getRunnerLabel()}`)
dockerArgs.push(`--network=${network}`)
if ((args as ServiceContainerInfo)?.contextName) {
dockerArgs.push(
`--network-alias=${(args as ServiceContainerInfo)?.contextName}`
)
}
dockerArgs.push('--name', name)
if (args?.portMappings?.length) {
for (const portMapping of args.portMappings) {
dockerArgs.push('-p', portMapping)
}
}
if (args.createOptions) {
dockerArgs.push(...args.createOptions.split(' '))
}
if (args.environmentVariables) {
for (const [key, value] of Object.entries(args.environmentVariables)) {
dockerArgs.push('-e')
if (!value) {
dockerArgs.push(`"${key}"`)
} else {
dockerArgs.push(`"${key}=${value}"`)
}
}
}
const mountVolumes = [
...(args.userMountVolumes || []),
...((args as JobContainerInfo | StepContainerInfo).systemMountVolumes || [])
]
for (const mountVolume of mountVolumes) {
dockerArgs.push(
`-v=${mountVolume.sourceVolumePath}:${mountVolume.targetVolumePath}`
)
}
if (args.entryPoint) {
dockerArgs.push(`--entrypoint`)
dockerArgs.push(args.entryPoint)
}
dockerArgs.push(args.image)
if (args.entryPointArgs) {
for (const entryPointArg of args.entryPointArgs) {
dockerArgs.push(entryPointArg)
}
}
const id = (await runDockerCommand(dockerArgs)).trim()
if (!id) {
throw new Error('Could not read id from docker command')
}
const response: ContainerMetadata = { id, image: args.image }
if (network) {
response.network = network
}
response.ports = []
if ((args as ServiceContainerInfo).contextName) {
response['contextName'] = (args as ServiceContainerInfo).contextName
}
return response
}
export async function containerPull(
image: string,
configLocation: string
): Promise<void> {
const dockerArgs: string[] = ['pull']
if (configLocation) {
dockerArgs.push('--config')
dockerArgs.push(configLocation)
}
dockerArgs.push(image)
for (let i = 0; i < 3; i++) {
try {
await runDockerCommand(dockerArgs)
return
} catch {
core.info(`docker pull failed on attempt: ${i + 1}`)
}
}
throw new Error('Exiting docker pull after 3 failed attempts')
}
export async function containerStart(id: string): Promise<void> {
const dockerArgs: string[] = ['start']
dockerArgs.push(`${id}`)
await runDockerCommand(dockerArgs)
}
export async function containerStop(id: string | string[]): Promise<void> {
const dockerArgs: string[] = ['stop']
if (Array.isArray(id)) {
for (const v of id) {
dockerArgs.push(v)
}
} else {
dockerArgs.push(id)
}
await runDockerCommand(dockerArgs)
}
export async function containerRemove(id: string | string[]): Promise<void> {
const dockerArgs: string[] = ['rm']
dockerArgs.push('--force')
if (Array.isArray(id)) {
for (const v of id) {
dockerArgs.push(v)
}
} else {
dockerArgs.push(id)
}
await runDockerCommand(dockerArgs)
}
export async function containerBuild(
args: RunContainerStepArgs,
tag: string
): Promise<void> {
const context = path.dirname(`${env.GITHUB_WORKSPACE}/${args.dockerfile}`)
const dockerArgs: string[] = ['build']
dockerArgs.push('-t', tag)
dockerArgs.push('-f', `${env.GITHUB_WORKSPACE}/${args.dockerfile}`)
dockerArgs.push(context)
// TODO: figure out build working directory
await runDockerCommand(dockerArgs, {
workingDir: args['buildWorkingDirectory']
})
}
export async function containerLogs(id: string): Promise<void> {
const dockerArgs: string[] = ['logs']
dockerArgs.push('--details')
dockerArgs.push(id)
await runDockerCommand(dockerArgs)
}
export async function containerNetworkRemove(network: string): Promise<void> {
const dockerArgs: string[] = ['network']
dockerArgs.push('rm')
dockerArgs.push(network)
await runDockerCommand(dockerArgs)
}
export async function containerPrune(): Promise<void> {
const dockerPSArgs: string[] = [
'ps',
'--all',
'--quiet',
'--no-trunc',
'--filter',
`label=${getRunnerLabel()}`
]
const res = (await runDockerCommand(dockerPSArgs)).trim()
if (res) {
await containerRemove(res.split('\n'))
}
}
async function containerHealthStatus(id: string): Promise<ContainerHealth> {
const dockerArgs = [
'inspect',
'--format="{{if .Config.Healthcheck}}{{print .State.Health.Status}}{{end}}"',
id
]
const result = (await runDockerCommand(dockerArgs)).trim().replace(/"/g, '')
if (
result === ContainerHealth.Healthy ||
result === ContainerHealth.Starting ||
result === ContainerHealth.Unhealthy
) {
return result
}
return ContainerHealth.None
}
export async function healthCheck({
id,
image
}: ContainerMetadata): Promise<void> {
let health = await containerHealthStatus(id)
if (health === ContainerHealth.None) {
core.info(
`Healthcheck is not set for container ${image}, considered as ${ContainerHealth.Healthy}`
)
return
}
let tries = 1
while (health === ContainerHealth.Starting && tries < 13) {
const backOffSeconds = Math.pow(2, tries)
core.info(
`Container '${image}' is '${health}', retry in ${backOffSeconds} seconds`
)
await new Promise(resolve => setTimeout(resolve, 1000 * backOffSeconds))
tries++
health = await containerHealthStatus(id)
}
if (health !== ContainerHealth.Healthy) {
throw new String(
`Container '${image}' is unhealthy with status '${health}'`
)
}
}
export async function containerPorts(id: string): Promise<string[]> {
const dockerArgs = ['port', id]
const portMappings = (await runDockerCommand(dockerArgs)).trim()
return portMappings.split('\n')
}
export async function registryLogin(args): Promise<string> {
if (!args.registry) {
return ''
}
const credentials = {
username: args.registry.username,
password: args.registry.password
}
const configLocation = `${env.RUNNER_TEMP}/.docker_${uuidv4()}`
fs.mkdirSync(configLocation)
try {
await dockerLogin(configLocation, args.registry.serverUrl, credentials)
} catch (error) {
fs.rmdirSync(configLocation, { recursive: true })
throw error
}
return configLocation
}
export async function registryLogout(configLocation: string): Promise<void> {
if (configLocation) {
await dockerLogout(configLocation)
fs.rmdirSync(configLocation, { recursive: true })
}
}
async function dockerLogin(
configLocation: string,
registry: string,
credentials: { username: string; password: string }
): Promise<void> {
const credentialsArgs =
credentials.username && credentials.password
? ['-u', credentials.username, '--password-stdin']
: []
const dockerArgs = [
'--config',
configLocation,
'login',
...credentialsArgs,
registry
]
const options: RunDockerCommandOptions =
credentials.username && credentials.password
? {
input: Buffer.from(credentials.password, 'utf-8')
}
: {}
await runDockerCommand(dockerArgs, options)
}
async function dockerLogout(configLocation: string): Promise<void> {
const dockerArgs = ['--config', configLocation, 'logout']
await runDockerCommand(dockerArgs)
}
export async function containerExecStep(
args,
containerId: string
): Promise<void> {
const dockerArgs: string[] = ['exec', '-i']
dockerArgs.push(`--workdir=${args.workingDirectory}`)
for (const [key, value] of Object.entries(args['environmentVariables'])) {
dockerArgs.push('-e')
if (!value) {
dockerArgs.push(`"${key}"`)
} else {
dockerArgs.push(`"${key}=${value}"`)
}
}
// Todo figure out prepend path and update it here
// (we need to pass path in as -e Path={fullpath}) where {fullpath is the prepend path added to the current containers path}
dockerArgs.push(containerId)
dockerArgs.push(args.entryPoint)
for (const entryPointArg of args.entryPointArgs) {
dockerArgs.push(entryPointArg)
}
await runDockerCommand(dockerArgs)
}
export async function containerRun(
args: RunContainerStepArgs,
name: string,
network: string
): Promise<void> {
if (!args.image) {
throw new Error('expected image to be set')
}
const dockerArgs: string[] = ['run', '--rm']
dockerArgs.push('--name', name)
dockerArgs.push(`--workdir=${args.workingDirectory}`)
dockerArgs.push(`--label=${getRunnerLabel()}`)
dockerArgs.push(`--network=${network}`)
if (args.createOptions) {
dockerArgs.push(...args.createOptions.split(' '))
}
if (args.environmentVariables) {
for (const [key, value] of Object.entries(args.environmentVariables)) {
// Pass in this way to avoid printing secrets
env[key] = value ?? undefined
dockerArgs.push('-e')
dockerArgs.push(key)
}
}
const mountVolumes = [
...(args.userMountVolumes || []),
...(args.systemMountVolumes || [])
]
for (const mountVolume of mountVolumes) {
dockerArgs.push(`-v`)
dockerArgs.push(
`${mountVolume.sourceVolumePath}:${mountVolume.targetVolumePath}${
mountVolume.readOnly ? ':ro' : ''
}`
)
}
if (args['entryPoint']) {
dockerArgs.push(`--entrypoint`)
dockerArgs.push(args['entryPoint'])
}
dockerArgs.push(args.image)
if (args.entryPointArgs) {
for (const entryPointArg of args.entryPointArgs) {
dockerArgs.push(entryPointArg)
}
}
await runDockerCommand(dockerArgs)
}
export async function isContainerAlpine(containerId: string): Promise<boolean> {
const dockerArgs: string[] = [
'exec',
containerId,
'sh',
'-c',
"[ $(cat /etc/*release* | grep -i -e '^ID=*alpine*' -c) != 0 ] || exit 1"
]
try {
await runDockerCommand(dockerArgs)
return true
} catch {
return false
}
}
enum ContainerHealth {
Starting = 'starting',
Healthy = 'healthy',
Unhealthy = 'unhealthy',
None = 'none'
}
export interface ContainerMetadata {
id: string
image: string
network?: string
ports?: string[]
contextName?: string
}

View File

@@ -0,0 +1,2 @@
export * from './container'
export * from './network'

View File

@@ -0,0 +1,26 @@
import { runDockerCommand } from '../utils'
import { getRunnerLabel } from './constants'
export async function networkCreate(networkName): Promise<void> {
const dockerArgs: string[] = ['network', 'create']
dockerArgs.push('--label')
dockerArgs.push(getRunnerLabel())
dockerArgs.push(networkName)
await runDockerCommand(dockerArgs)
}
export async function networkRemove(networkName): Promise<void> {
const dockerArgs: string[] = ['network']
dockerArgs.push('rm')
dockerArgs.push(networkName)
await runDockerCommand(dockerArgs)
}
export async function networkPrune(): Promise<void> {
const dockerArgs: string[] = ['network']
dockerArgs.push('prune')
dockerArgs.push('--force')
dockerArgs.push(`--filter`)
dockerArgs.push(`label=${getRunnerLabel()}`)
await runDockerCommand(dockerArgs)
}

View File

@@ -0,0 +1,21 @@
import {
containerRemove,
containerNetworkRemove
} from '../dockerCommands/container'
// eslint-disable-next-line @typescript-eslint/no-unused-vars
export async function cleanupJob(args, state, responseFile): Promise<void> {
const containerIds: string[] = []
if (state?.container) {
containerIds.push(state.container)
}
if (state?.services) {
containerIds.push(state.services)
}
if (containerIds.length > 0) {
await containerRemove(containerIds)
}
if (state.network) {
await containerNetworkRemove(state.network)
}
}

View File

@@ -0,0 +1,4 @@
export * from './cleanup-job'
export * from './prepare-job'
export * from './run-script-step'
export * from './run-container-step'

View File

@@ -0,0 +1,205 @@
import * as core from '@actions/core'
import { ContextPorts, PrepareJobArgs, writeToResponseFile } from 'hooklib/lib'
import { exit } from 'process'
import { v4 as uuidv4 } from 'uuid'
import {
ContainerMetadata,
containerPorts,
containerPrune,
containerPull,
containerStart,
createContainer,
healthCheck,
isContainerAlpine,
registryLogin,
registryLogout
} from '../dockerCommands/container'
import { networkCreate, networkPrune } from '../dockerCommands/network'
import { sanitize } from '../utils'
export async function prepareJob(
args: PrepareJobArgs,
responseFile
): Promise<void> {
await containerPrune()
await networkPrune()
const container = args.container
const services = args.services
if (!container?.image && !services?.length) {
core.info('No containers exist, skipping hook invocation')
exit(0)
}
const networkName = generateNetworkName()
// Create network
await networkCreate(networkName)
// Create Job Container
let containerMetadata: ContainerMetadata | undefined = undefined
if (!container?.image) {
core.info('No job container provided, skipping')
} else {
setupContainer(container)
const configLocation = await registryLogin(container.registry)
try {
await containerPull(container.image, configLocation)
} finally {
await registryLogout(configLocation)
}
containerMetadata = await createContainer(
container,
generateContainerName(container.image),
networkName
)
if (!containerMetadata?.id) {
throw new Error('Failed to create container')
}
await containerStart(containerMetadata?.id)
}
// Create Service Containers
const servicesMetadata: ContainerMetadata[] = []
if (!services?.length) {
core.info('No service containers provided, skipping')
} else {
for (const service of services) {
const configLocation = await registryLogin(service.registry)
try {
await containerPull(service.image, configLocation)
} finally {
await registryLogout(configLocation)
}
setupContainer(service)
const response = await createContainer(
service,
generateContainerName(service.image),
networkName
)
servicesMetadata.push(response)
await containerStart(response.id)
}
}
if (
(container && !containerMetadata?.id) ||
(services?.length && servicesMetadata.some(s => !s.id))
) {
throw new Error(
`Not all containers are started correctly ${
containerMetadata?.id
}, ${servicesMetadata.map(e => e.id).join(',')}`
)
}
const isAlpine = await isContainerAlpine(containerMetadata!.id)
if (containerMetadata?.id) {
containerMetadata.ports = await containerPorts(containerMetadata.id)
}
if (servicesMetadata?.length) {
for (const serviceMetadata of servicesMetadata) {
serviceMetadata.ports = await containerPorts(serviceMetadata.id)
}
}
const healthChecks: Promise<void>[] = [healthCheck(containerMetadata!)]
for (const service of servicesMetadata) {
healthChecks.push(healthCheck(service))
}
try {
await Promise.all(healthChecks)
core.info('All services are healthy')
} catch (error) {
core.error(`Failed to initialize containers, ${error}`)
throw new Error(`Failed to initialize containers, ${error}`)
}
generateResponseFile(
responseFile,
networkName,
containerMetadata,
servicesMetadata,
isAlpine
)
}
function generateResponseFile(
responseFile: string,
networkName: string,
containerMetadata?: ContainerMetadata,
servicesMetadata?: ContainerMetadata[],
isAlpine = false
): void {
// todo figure out if we are alpine
const response = {
state: { network: networkName },
context: {},
isAlpine
}
if (containerMetadata) {
response.state['container'] = containerMetadata.id
const contextMeta = JSON.parse(JSON.stringify(containerMetadata))
if (containerMetadata.ports) {
contextMeta.ports = transformDockerPortsToContextPorts(containerMetadata)
}
response.context['container'] = contextMeta
if (containerMetadata.ports) {
response.context['container'].ports =
transformDockerPortsToContextPorts(containerMetadata)
}
}
if (servicesMetadata && servicesMetadata.length > 0) {
response.state['services'] = []
response.context['services'] = []
for (const meta of servicesMetadata) {
response.state['services'].push(meta.id)
const contextMeta = JSON.parse(JSON.stringify(meta))
if (contextMeta.ports) {
contextMeta.ports = transformDockerPortsToContextPorts(contextMeta)
}
response.context['services'].push(contextMeta)
}
}
writeToResponseFile(responseFile, JSON.stringify(response))
}
function setupContainer(container): void {
container.entryPointArgs = [`-f`, `/dev/null`]
container.entryPoint = 'tail'
}
function generateNetworkName(): string {
return `github_network_${uuidv4()}`
}
function generateContainerName(container): string {
const randomAlias = uuidv4().replace(/-/g, '')
const randomSuffix = uuidv4().substring(0, 6)
return `${randomAlias}_${sanitize(container.image)}_${randomSuffix}`
}
function transformDockerPortsToContextPorts(
meta: ContainerMetadata
): ContextPorts {
// ex: '80/tcp -> 0.0.0.0:80'
const re = /^(\d+)\/(\w+)? -> (.*):(\d+)$/
const contextPorts: ContextPorts = {}
if (meta.ports) {
for (const port of meta.ports) {
const matches = port.match(re)
if (!matches) {
throw new Error(
'Container ports could not match the regex: "^(\\d+)\\/(\\w+)? -> (.*):(\\d+)$"'
)
}
contextPorts[matches[1]] = matches[matches.length - 1]
}
}
return contextPorts
}

View File

@@ -0,0 +1,39 @@
import {
containerBuild,
registryLogin,
registryLogout,
containerPull,
containerRun
} from '../dockerCommands'
import { v4 as uuidv4 } from 'uuid'
import * as core from '@actions/core'
import { RunContainerStepArgs } from 'hooklib/lib'
import { getRunnerLabel } from '../dockerCommands/constants'
export async function runContainerStep(
args: RunContainerStepArgs,
state
): Promise<void> {
const tag = generateBuildTag() // for docker build
if (!args.image) {
core.error('expected an image')
} else {
if (args.dockerfile) {
await containerBuild(args, tag)
args.image = tag
} else {
const configLocation = await registryLogin(args)
try {
await containerPull(args.image, configLocation)
} finally {
await registryLogout(configLocation)
}
}
}
// container will get pruned at the end of the job based on the label, no need to cleanup here
await containerRun(args, tag.split(':')[1], state.network)
}
function generateBuildTag(): string {
return `${getRunnerLabel()}:${uuidv4().substring(0, 6)}`
}

View File

@@ -0,0 +1,9 @@
import { RunScriptStepArgs } from 'hooklib/lib'
import { containerExecStep } from '../dockerCommands'
export async function runScriptStep(
args: RunScriptStepArgs,
state
): Promise<void> {
await containerExecStep(args, state.container)
}

View File

@@ -0,0 +1,48 @@
import * as core from '@actions/core'
import {
Command,
getInputFromStdin,
PrepareJobArgs,
RunContainerStepArgs,
RunScriptStepArgs
} from 'hooklib/lib'
import { exit } from 'process'
import {
cleanupJob,
prepareJob,
runContainerStep,
runScriptStep
} from './hooks'
async function run(): Promise<void> {
const input = await getInputFromStdin()
const args = input['args']
const command = input['command']
const responseFile = input['responseFile']
const state = input['state']
try {
switch (command) {
case Command.PrepareJob:
await prepareJob(args as PrepareJobArgs, responseFile)
return exit(0)
case Command.CleanupJob:
await cleanupJob(null, state, null)
return exit(0)
case Command.RunScriptStep:
await runScriptStep(args as RunScriptStepArgs, state)
return exit(0)
case Command.RunContainerStep:
await runContainerStep(args as RunContainerStepArgs, state)
return exit(0)
default:
throw new Error(`Command not recognized: ${command}`)
}
} catch (error) {
core.error(`${error}`)
exit(1)
}
}
void run()

View File

@@ -0,0 +1,56 @@
/* eslint-disable @typescript-eslint/no-var-requires */
/* eslint-disable @typescript-eslint/no-require-imports */
/* eslint-disable import/no-commonjs */
import * as core from '@actions/core'
// Import this way otherwise typescript has errors
const exec = require('@actions/exec')
export interface RunDockerCommandOptions {
workingDir?: string
input?: Buffer
}
export async function runDockerCommand(
args: string[],
options?: RunDockerCommandOptions
): Promise<string> {
const pipes = await exec.getExecOutput('docker', args, options)
if (pipes.exitCode !== 0) {
core.error(`Docker failed with exit code ${pipes.exitCode}`)
return Promise.reject(pipes.stderr)
}
return Promise.resolve(pipes.stdout)
}
export function sanitize(val: string): string {
if (!val || typeof val !== 'string') {
return ''
}
const newNameBuilder: string[] = []
for (let i = 0; i < val.length; i++) {
const char = val.charAt(i)
if (!newNameBuilder.length) {
if (isAlpha(char)) {
newNameBuilder.push(char)
}
} else {
if (isAlpha(char) || isNumeric(char) || char === '_') {
newNameBuilder.push(char)
}
}
}
return newNameBuilder.join('')
}
// isAlpha accepts single character and checks if
// that character is [a-zA-Z]
function isAlpha(val: string): boolean {
return (
val.length === 1 &&
((val >= 'a' && val <= 'z') || (val >= 'A' && val <= 'Z'))
)
}
function isNumeric(val: string): boolean {
return val.length === 1 && val >= '0' && val <= '9'
}

View File

@@ -0,0 +1,62 @@
import { prepareJob, cleanupJob } from '../src/hooks'
import { v4 as uuidv4 } from 'uuid'
import * as fs from 'fs'
import * as path from 'path'
import TestSetup from './test-setup'
const prepareJobInputPath = path.resolve(
`${__dirname}/../../../examples/prepare-job.json`
)
const tmpOutputDir = `${__dirname}/${uuidv4()}`
let prepareJobOutputPath: string
let prepareJobData: any
let testSetup: TestSetup
jest.useRealTimers()
describe('cleanup job', () => {
beforeAll(() => {
fs.mkdirSync(tmpOutputDir, { recursive: true })
})
afterAll(() => {
fs.rmSync(tmpOutputDir, { recursive: true })
})
beforeEach(async () => {
const prepareJobRawData = fs.readFileSync(prepareJobInputPath, 'utf8')
prepareJobData = JSON.parse(prepareJobRawData.toString())
prepareJobOutputPath = `${tmpOutputDir}/prepare-job-output-${uuidv4()}.json`
fs.writeFileSync(prepareJobOutputPath, '')
testSetup = new TestSetup()
testSetup.initialize()
prepareJobData.args.container.userMountVolumes = testSetup.userMountVolumes
prepareJobData.args.container.systemMountVolumes =
testSetup.systemMountVolumes
prepareJobData.args.container.workingDirectory = testSetup.workingDirectory
await prepareJob(prepareJobData.args, prepareJobOutputPath)
})
afterEach(() => {
fs.rmSync(prepareJobOutputPath, { force: true })
testSetup.teardown()
})
it('should cleanup successfully', async () => {
const prepareJobOutputContent = fs.readFileSync(
prepareJobOutputPath,
'utf-8'
)
const parsedPrepareJobOutput = JSON.parse(prepareJobOutputContent)
await expect(
cleanupJob(prepareJobData.args, parsedPrepareJobOutput.state, null)
).resolves.not.toThrow()
})
})

View File

@@ -0,0 +1,14 @@
import { containerPull } from '../src/dockerCommands'
jest.useRealTimers()
describe('container pull', () => {
it('should fail', async () => {
const arg = { image: 'doesNotExist' }
await expect(containerPull(arg.image, '')).rejects.toThrow()
})
it('should succeed', async () => {
const arg = { image: 'ubuntu:latest' }
await expect(containerPull(arg.image, '')).resolves.not.toThrow()
})
})

View File

@@ -0,0 +1,117 @@
import {
prepareJob,
cleanupJob,
runScriptStep,
runContainerStep
} from '../src/hooks'
import * as fs from 'fs'
import * as path from 'path'
import { v4 as uuidv4 } from 'uuid'
import TestSetup from './test-setup'
const prepareJobJson = fs.readFileSync(
path.resolve(__dirname + '/../../../examples/prepare-job.json'),
'utf8'
)
const containerStepJson = fs.readFileSync(
path.resolve(__dirname + '/../../../examples/run-container-step.json'),
'utf8'
)
const tmpOutputDir = `${__dirname}/_temp/${uuidv4()}`
let prepareJobData: any
let scriptStepJson: any
let scriptStepData: any
let containerStepData: any
let prepareJobOutputFilePath: string
let testSetup: TestSetup
describe('e2e', () => {
beforeAll(() => {
fs.mkdirSync(tmpOutputDir, { recursive: true })
})
afterAll(() => {
fs.rmSync(tmpOutputDir, { recursive: true })
})
beforeEach(() => {
// init dirs
testSetup = new TestSetup()
testSetup.initialize()
prepareJobData = JSON.parse(prepareJobJson)
prepareJobData.args.container.userMountVolumes = testSetup.userMountVolumes
prepareJobData.args.container.systemMountVolumes =
testSetup.systemMountVolumes
prepareJobData.args.container.workingDirectory = testSetup.workingDirectory
scriptStepJson = fs.readFileSync(
path.resolve(__dirname + '/../../../examples/run-script-step.json'),
'utf8'
)
scriptStepData = JSON.parse(scriptStepJson)
scriptStepData.args.workingDirectory = testSetup.workingDirectory
containerStepData = JSON.parse(containerStepJson)
containerStepData.args.workingDirectory = testSetup.workingDirectory
containerStepData.args.userMountVolumes = testSetup.userMountVolumes
containerStepData.args.systemMountVolumes = testSetup.systemMountVolumes
prepareJobOutputFilePath = `${tmpOutputDir}/prepare-job-output-${uuidv4()}.json`
fs.writeFileSync(prepareJobOutputFilePath, '')
})
afterEach(() => {
fs.rmSync(prepareJobOutputFilePath, { force: true })
testSetup.teardown()
})
it('should prepare job, then run script step, then run container step then cleanup', async () => {
await expect(
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
).resolves.not.toThrow()
let rawState = fs.readFileSync(prepareJobOutputFilePath, 'utf-8')
let resp = JSON.parse(rawState)
await expect(
runScriptStep(scriptStepData.args, resp.state)
).resolves.not.toThrow()
await expect(
runContainerStep(containerStepData.args, resp.state)
).resolves.not.toThrow()
await expect(cleanupJob(resp, resp.state, null)).resolves.not.toThrow()
})
it('should prepare job, then run script step, then run container step with Dockerfile then cleanup', async () => {
await expect(
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
).resolves.not.toThrow()
let rawState = fs.readFileSync(prepareJobOutputFilePath, 'utf-8')
let resp = JSON.parse(rawState)
await expect(
runScriptStep(scriptStepData.args, resp.state)
).resolves.not.toThrow()
const dockerfilePath = `${tmpOutputDir}/Dockerfile`
fs.writeFileSync(
dockerfilePath,
`FROM ubuntu:latest
ENV TEST=test
ENTRYPOINT [ "tail", "-f", "/dev/null" ]
`
)
const containerStepDataCopy = JSON.parse(JSON.stringify(containerStepData))
process.env.GITHUB_WORKSPACE = tmpOutputDir
containerStepDataCopy.args.dockerfile = 'Dockerfile'
containerStepDataCopy.args.context = '.'
console.log(containerStepDataCopy.args)
await expect(
runContainerStep(containerStepDataCopy.args, resp.state)
).resolves.not.toThrow()
await expect(cleanupJob(resp, resp.state, null)).resolves.not.toThrow()
})
})

View File

@@ -0,0 +1,103 @@
import * as fs from 'fs'
import { v4 as uuidv4 } from 'uuid'
import { prepareJob } from '../src/hooks'
import TestSetup from './test-setup'
jest.useRealTimers()
let prepareJobOutputPath: string
let prepareJobData: any
const tmpOutputDir = `${__dirname}/_temp/${uuidv4()}`
const prepareJobInputPath = `${__dirname}/../../../examples/prepare-job.json`
let testSetup: TestSetup
describe('prepare job', () => {
beforeAll(() => {
fs.mkdirSync(tmpOutputDir, { recursive: true })
})
afterAll(() => {
fs.rmSync(tmpOutputDir, { recursive: true })
})
beforeEach(async () => {
testSetup = new TestSetup()
testSetup.initialize()
let prepareJobRawData = fs.readFileSync(prepareJobInputPath, 'utf8')
prepareJobData = JSON.parse(prepareJobRawData.toString())
prepareJobData.args.container.userMountVolumes = testSetup.userMountVolumes
prepareJobData.args.container.systemMountVolumes =
testSetup.systemMountVolumes
prepareJobData.args.container.workingDirectory = testSetup.workingDirectory
prepareJobOutputPath = `${tmpOutputDir}/prepare-job-output-${uuidv4()}.json`
fs.writeFileSync(prepareJobOutputPath, '')
})
afterEach(() => {
testSetup.teardown()
})
it('should not throw', async () => {
await expect(
prepareJob(prepareJobData.args, prepareJobOutputPath)
).resolves.not.toThrow()
expect(() => fs.readFileSync(prepareJobOutputPath, 'utf-8')).not.toThrow()
})
it('should have JSON output written to a file', async () => {
await prepareJob(prepareJobData.args, prepareJobOutputPath)
const prepareJobOutputContent = fs.readFileSync(
prepareJobOutputPath,
'utf-8'
)
expect(() => JSON.parse(prepareJobOutputContent)).not.toThrow()
})
it('should have context written to a file', async () => {
await prepareJob(prepareJobData.args, prepareJobOutputPath)
const prepareJobOutputContent = fs.readFileSync(
prepareJobOutputPath,
'utf-8'
)
const parsedPrepareJobOutput = JSON.parse(prepareJobOutputContent)
expect(parsedPrepareJobOutput.context).toBeDefined()
})
it('should have container ids written to file', async () => {
await prepareJob(prepareJobData.args, prepareJobOutputPath)
const prepareJobOutputContent = fs.readFileSync(
prepareJobOutputPath,
'utf-8'
)
const parsedPrepareJobOutput = JSON.parse(prepareJobOutputContent)
expect(parsedPrepareJobOutput.context.container.id).toBeDefined()
expect(typeof parsedPrepareJobOutput.context.container.id).toBe('string')
expect(parsedPrepareJobOutput.context.container.id).toMatch(/^[0-9a-f]+$/)
})
it('should have ports for context written in form [containerPort]:[hostPort]', async () => {
await prepareJob(prepareJobData.args, prepareJobOutputPath)
const prepareJobOutputContent = fs.readFileSync(
prepareJobOutputPath,
'utf-8'
)
const parsedPrepareJobOutput = JSON.parse(prepareJobOutputContent)
const mainContainerPorts = parsedPrepareJobOutput.context.container.ports
expect(mainContainerPorts['8080']).toBe('80')
const redisService = parsedPrepareJobOutput.context.services.find(
s => s.image === 'redis'
)
const redisServicePorts = redisService.ports
expect(redisServicePorts['80']).toBe('8080')
expect(redisServicePorts['8080']).toBe('8088')
})
})

View File

@@ -0,0 +1,112 @@
import * as fs from 'fs'
import { v4 as uuidv4 } from 'uuid'
import { env } from 'process'
import { Mount } from 'hooklib'
export default class TestSetup {
private testdir: string
private runnerMockDir: string
private runnerMockSubdirs = {
work: '_work',
externals: 'externals',
workTemp: '_work/_temp',
workActions: '_work/_actions',
workTool: '_work/_tool',
githubHome: '_work/_temp/_github_home',
githubWorkflow: '_work/_temp/_github_workflow'
}
private readonly projectName = 'example'
constructor() {
this.testdir = `${__dirname}/_temp/${uuidv4()}`
this.runnerMockDir = `${this.testdir}/runner/_layout`
}
private get allTestDirectories() {
const resp = [this.testdir, this.runnerMockDir]
for (const [key, value] of Object.entries(this.runnerMockSubdirs)) {
resp.push(`${this.runnerMockDir}/${value}`)
}
resp.push(
`${this.runnerMockDir}/_work/${this.projectName}/${this.projectName}`
)
return resp
}
public initialize(): void {
for (const dir of this.allTestDirectories) {
fs.mkdirSync(dir, { recursive: true })
}
env['RUNNER_NAME'] = 'test'
env[
'RUNNER_TEMP'
] = `${this.runnerMockDir}/${this.runnerMockSubdirs.workTemp}`
}
public teardown(): void {
fs.rmdirSync(this.testdir, { recursive: true })
}
public get userMountVolumes(): Mount[] {
return [
{
sourceVolumePath: 'my_docker_volume',
targetVolumePath: '/volume_mount',
readOnly: false
}
]
}
public get systemMountVolumes(): Mount[] {
return [
{
sourceVolumePath: '/var/run/docker.sock',
targetVolumePath: '/var/run/docker.sock',
readOnly: false
},
{
sourceVolumePath: `${this.runnerMockDir}/${this.runnerMockSubdirs.work}`,
targetVolumePath: '/__w',
readOnly: false
},
{
sourceVolumePath: `${this.runnerMockDir}/${this.runnerMockSubdirs.externals}`,
targetVolumePath: '/__e',
readOnly: true
},
{
sourceVolumePath: `${this.runnerMockDir}/${this.runnerMockSubdirs.workTemp}`,
targetVolumePath: '/__w/_temp',
readOnly: false
},
{
sourceVolumePath: `${this.runnerMockDir}/${this.runnerMockSubdirs.workActions}`,
targetVolumePath: '/__w/_actions',
readOnly: false
},
{
sourceVolumePath: `${this.runnerMockDir}/${this.runnerMockSubdirs.workTool}`,
targetVolumePath: '/__w/_tool',
readOnly: false
},
{
sourceVolumePath: `${this.runnerMockDir}/${this.runnerMockSubdirs.githubHome}`,
targetVolumePath: '/github/home',
readOnly: false
},
{
sourceVolumePath: `${this.runnerMockDir}/${this.runnerMockSubdirs.githubWorkflow}`,
targetVolumePath: '/github/workflow',
readOnly: false
}
]
}
public get workingDirectory(): string {
return `/__w/${this.projectName}/${this.projectName}`
}
}

View File

@@ -0,0 +1,12 @@
import { sanitize } from '../src/utils'
describe('Utilities', () => {
it('should return sanitized image name', () => {
expect(sanitize('ubuntu:latest')).toBe('ubuntulatest')
})
it('should return the same string', () => {
const validStr = 'teststr8_one'
expect(sanitize(validStr)).toBe(validStr)
})
})

View File

@@ -0,0 +1,11 @@
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"baseUrl": "./",
"outDir": "./lib",
"rootDir": "./src"
},
"include": [
"./src"
]
}

4350
packages/hooklib/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,28 @@
{
"name": "hooklib",
"version": "0.1.0",
"description": "",
"main": "lib/index.js",
"types": "index.d.ts",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1",
"build": "tsc",
"format": "prettier --write '**/*.ts'",
"format-check": "prettier --check '**/*.ts'",
"lint": "eslint src/**/*.ts"
},
"author": "",
"license": "MIT",
"devDependencies": {
"@types/node": "^17.0.23",
"@typescript-eslint/parser": "^5.18.0",
"@zeit/ncc": "^0.22.3",
"eslint": "^8.12.0",
"eslint-plugin-github": "^4.3.6",
"prettier": "^2.6.2",
"typescript": "^4.6.3"
},
"dependencies": {
"@actions/core": "^1.6.0"
}
}

View File

@@ -0,0 +1,2 @@
export * from './interfaces'
export * from './utils'

View File

@@ -0,0 +1,99 @@
export enum Command {
PrepareJob = 'prepare_job',
CleanupJob = 'cleanup_job',
RunContainerStep = 'run_container_step',
RunScriptStep = 'run_script_step'
}
export interface HookData {
command: Command
responseFile: string
args?: PrepareJobArgs | RunContainerStepArgs | RunScriptStepArgs
state?: { [key: string]: any }
}
export interface PrepareJobArgs {
container?: JobContainerInfo
services?: ServiceContainerInfo[]
}
export type RunContainerStepArgs = StepContainerInfo
export interface RunScriptStepArgs {
entryPoint: string
entryPointArgs: string[]
environmentVariables?: { [key: string]: string }
prependPath?: string[]
workingDirectory: string
}
export interface ContainerInfo {
image?: string
entryPoint?: string
entryPointArgs?: string[]
createOptions?: string
environmentVariables?: { [key: string]: string }
userMountVolumes?: Mount[]
registry?: Registry
portMappings?: string[]
}
export interface ServiceContainerInfo extends ContainerInfo {
contextName: string
image: string
}
export interface JobContainerInfo extends ContainerInfo {
image: string
workingDirectory: string
systemMountVolumes: Mount[]
}
export interface StepContainerInfo extends ContainerInfo {
prependPath?: string[]
workingDirectory: string
dockerfile?: string
systemMountVolumes: Mount[]
}
export interface Mount {
sourceVolumePath: string
targetVolumePath: string
readOnly: boolean
}
export interface Registry {
username?: string
password?: string
serverUrl: string
}
export enum Protocol {
TCP = 'tcp',
UDP = 'udp'
}
export enum PodPhase {
PENDING = 'Pending',
RUNNING = 'Running',
SUCCEEDED = 'Succeded',
FAILED = 'Failed',
UNKNOWN = 'Unknown'
}
export interface PrepareJobResponse {
state?: object
context?: ContainerContext
services?: { [key: string]: ContainerContext }
alpine: boolean
}
export interface ContainerContext {
id?: string
network?: string
ports?: { [key: string]: string }
}
export interface ContextPorts {
[source: string]: string // source -> target
}

View File

@@ -0,0 +1,44 @@
import * as core from '@actions/core'
import * as events from 'events'
import * as fs from 'fs'
import * as os from 'os'
import * as readline from 'readline'
import { HookData } from './interfaces'
export async function getInputFromStdin(): Promise<HookData> {
let input = ''
const rl = readline.createInterface({
input: process.stdin
})
rl.on('line', line => {
core.debug(`Line from STDIN: ${line}`)
input = line
})
await events.default.once(rl, 'close')
const inputJson = JSON.parse(input)
return inputJson as HookData
}
export function writeToResponseFile(filePath: string, message: any): void {
if (!filePath) {
throw new Error(`Expected file path`)
}
if (!fs.existsSync(filePath)) {
throw new Error(`Missing file at path: ${filePath}`)
}
fs.appendFileSync(filePath, `${toCommandValue(message)}${os.EOL}`, {
encoding: 'utf8'
})
}
function toCommandValue(input: any): string {
if (input === null || input === undefined) {
return ''
} else if (typeof input === 'string' || input instanceof String) {
return input as string
}
return JSON.stringify(input)
}

View File

@@ -0,0 +1,11 @@
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"baseUrl": "./",
"outDir": "./lib",
"rootDir": "./src"
},
"include": [
"./src"
]
}

12
packages/k8s/README.md Normal file
View File

@@ -0,0 +1,12 @@
# K8s Hooks
## Description
This implementation provides a way to dynamically spin up jobs to run container workflows, rather then relying on the default docker implementation. It is meant to be used when the runner itself is running in k8s, for example when using the [Actions Runner Controller](https://github.com/actions-runner-controller/actions-runner-controller)
## Pre-requisites
Some things are expected to be set when using these hooks
- The runner itself should be running in a pod, with a service account with the following permissions
- The `ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER=true` should be set to true
- The `ACTIONS_RUNNER_POD_NAME` env should be set to the name of the pod
- The runner pod should map a persistent volume claim into the `_work` directory
- The `ACTIONS_RUNNER_CLAIM_NAME` should be set to the persistent volume claim that contains the runner's working directory

View File

@@ -0,0 +1,13 @@
// eslint-disable-next-line import/no-commonjs
module.exports = {
clearMocks: true,
moduleFileExtensions: ['js', 'ts'],
testEnvironment: 'node',
testMatch: ['**/*-test.ts'],
testRunner: 'jest-circus/runner',
transform: {
'^.+\\.ts$': 'ts-jest'
},
setupFilesAfterEnv: ['./jest.setup.js'],
verbose: true
}

View File

@@ -0,0 +1 @@
jest.setTimeout(90000)

9076
packages/k8s/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

30
packages/k8s/package.json Normal file
View File

@@ -0,0 +1,30 @@
{
"name": "kubehooks",
"version": "0.1.0",
"description": "",
"main": "lib/index.js",
"scripts": {
"test": "jest --runInBand",
"build": "tsc && npx ncc build",
"format": "prettier --write '**/*.ts'",
"format-check": "prettier --check '**/*.ts'",
"lint": "eslint src/**/*.ts"
},
"author": "",
"license": "MIT",
"dependencies": {
"@actions/core": "^1.6.0",
"@actions/exec": "^1.1.1",
"@actions/io": "^1.1.2",
"@kubernetes/client-node": "^0.16.3",
"hooklib": "file:../hooklib"
},
"devDependencies": {
"@types/jest": "^27.4.1",
"@types/node": "^17.0.23",
"@vercel/ncc": "^0.33.4",
"jest": "^27.5.1",
"ts-jest": "^27.1.4",
"typescript": "^4.6.3"
}
}

View File

@@ -0,0 +1,5 @@
import { podPrune } from '../k8s'
export async function cleanupJob(): Promise<void> {
await podPrune()
}

View File

@@ -0,0 +1,58 @@
import { v4 as uuidv4 } from 'uuid'
export function getRunnerPodName(): string {
const name = process.env.ACTIONS_RUNNER_POD_NAME
if (!name) {
throw new Error(
"'ACTIONS_RUNNER_POD_NAME' env is required, please contact your self hosted runner administrator"
)
}
return name
}
export function getJobPodName(): string {
return `${getRunnerPodName().substring(
0,
MAX_POD_NAME_LENGTH - '-workflow'.length
)}-workflow`
}
export function getStepPodName(): string {
return `${getRunnerPodName().substring(
0,
MAX_POD_NAME_LENGTH - ('-step'.length + STEP_POD_NAME_SUFFIX_LENGTH)
)}-step-${uuidv4().substring(0, STEP_POD_NAME_SUFFIX_LENGTH)}`
}
export function getVolumeClaimName(): string {
const name = process.env.ACTIONS_RUNNER_CLAIM_NAME
if (!name) {
throw new Error(
"'ACTIONS_RUNNER_CLAIM_NAME' is required, please contact your self hosted runner administrator"
)
}
return name
}
const MAX_POD_NAME_LENGTH = 63
const STEP_POD_NAME_SUFFIX_LENGTH = 8
export const JOB_CONTAINER_NAME = 'job'
export class RunnerInstanceLabel {
runnerhook: string
constructor() {
this.runnerhook = process.env.ACTIONS_RUNNER_POD_NAME as string
}
get key(): string {
return 'runner-pod'
}
get value(): string {
return this.runnerhook
}
toString(): string {
return `runner-pod=${this.runnerhook}`
}
}

View File

@@ -0,0 +1,4 @@
export * from './cleanup-job'
export * from './prepare-job'
export * from './run-script-step'
export * from './run-container-step'

View File

@@ -0,0 +1,197 @@
import * as core from '@actions/core'
import * as io from '@actions/io'
import * as k8s from '@kubernetes/client-node'
import {
ContextPorts,
PodPhase,
prepareJobArgs,
writeToResponseFile
} from 'hooklib'
import path from 'path'
import {
containerPorts,
createPod,
isAuthPermissionsOK,
isPodContainerAlpine,
namespace,
podPrune,
requiredPermissions,
waitForPodPhases
} from '../k8s'
import {
containerVolumes,
DEFAULT_CONTAINER_ENTRY_POINT,
DEFAULT_CONTAINER_ENTRY_POINT_ARGS
} from '../k8s/utils'
import { JOB_CONTAINER_NAME } from './constants'
export async function prepareJob(
args: prepareJobArgs,
responseFile
): Promise<void> {
await podPrune()
if (!(await isAuthPermissionsOK())) {
throw new Error(
`The Service account needs the following permissions ${JSON.stringify(
requiredPermissions
)} on the pod resource in the '${namespace}' namespace. Please contact your self hosted runner administrator.`
)
}
await copyExternalsToRoot()
let container: k8s.V1Container | undefined = undefined
if (args.container?.image) {
core.info(`Using image '${args.container.image}' for job image`)
container = createPodSpec(args.container, JOB_CONTAINER_NAME, true)
}
let services: k8s.V1Container[] = []
if (args.services?.length) {
services = args.services.map(service => {
core.info(`Adding service '${service.image}' to pod definition`)
return createPodSpec(service, service.image.split(':')[0])
})
}
if (!container && !services?.length) {
throw new Error('No containers exist, skipping hook invocation')
}
let createdPod: k8s.V1Pod | undefined = undefined
try {
createdPod = await createPod(container, services, args.registry)
} catch (err) {
await podPrune()
throw new Error(`failed to create job pod: ${err}`)
}
if (!createdPod?.metadata?.name) {
throw new Error('created pod should have metadata.name')
}
try {
await waitForPodPhases(
createdPod.metadata.name,
new Set([PodPhase.RUNNING]),
new Set([PodPhase.PENDING])
)
} catch (err) {
await podPrune()
throw new Error(`Pod failed to come online with error: ${err}`)
}
core.info('Pod is ready for traffic')
let isAlpine = false
try {
isAlpine = await isPodContainerAlpine(
createdPod.metadata.name,
JOB_CONTAINER_NAME
)
} catch (err) {
throw new Error(`Failed to determine if the pod is alpine: ${err}`)
}
generateResponseFile(responseFile, createdPod, isAlpine)
}
function generateResponseFile(
responseFile: string,
appPod: k8s.V1Pod,
isAlpine
): void {
const response = {
state: {},
context: {},
isAlpine
}
const mainContainer = appPod.spec?.containers?.find(
c => c.name === JOB_CONTAINER_NAME
)
if (mainContainer) {
const mainContainerContextPorts: ContextPorts = {}
if (mainContainer?.ports) {
for (const port of mainContainer.ports) {
mainContainerContextPorts[port.containerPort] =
mainContainerContextPorts.hostPort
}
}
response.context['container'] = {
image: mainContainer.image,
ports: mainContainerContextPorts
}
}
const serviceContainers = appPod.spec?.containers.filter(
c => c.name !== JOB_CONTAINER_NAME
)
if (serviceContainers?.length) {
response.context['services'] = serviceContainers.map(c => {
if (!c.ports) {
return
}
const ctxPorts: ContextPorts = {}
for (const port of c.ports) {
ctxPorts[port.containerPort] = port.hostPort
}
return {
image: c.image,
ports: ctxPorts
}
})
}
writeToResponseFile(responseFile, JSON.stringify(response))
}
async function copyExternalsToRoot(): Promise<void> {
const workspace = process.env['RUNNER_WORKSPACE']
if (workspace) {
await io.cp(
path.join(workspace, '../../externals'),
path.join(workspace, '../externals'),
{ force: true, recursive: true, copySourceDirectory: false }
)
}
}
function createPodSpec(
container,
name: string,
jobContainer = false
): k8s.V1Container {
core.info(JSON.stringify(container))
if (!container.entryPointArgs) {
container.entryPointArgs = DEFAULT_CONTAINER_ENTRY_POINT_ARGS
}
container.entryPointArgs = DEFAULT_CONTAINER_ENTRY_POINT_ARGS
if (!container.entryPoint) {
container.entryPoint = DEFAULT_CONTAINER_ENTRY_POINT
}
const podContainer = {
name,
image: container.image,
command: [container.entryPoint],
args: container.entryPointArgs,
ports: containerPorts(container)
} as k8s.V1Container
if (container.workingDirectory) {
podContainer.workingDir = container.workingDirectory
}
podContainer.env = []
for (const [key, value] of Object.entries(
container['environmentVariables']
)) {
if (value && key !== 'HOME') {
podContainer.env.push({ name: key, value: value as string })
}
}
podContainer.volumeMounts = containerVolumes(
container.userMountVolumes,
jobContainer
)
return podContainer
}

View File

@@ -0,0 +1,69 @@
import * as k8s from '@kubernetes/client-node'
import * as core from '@actions/core'
import { PodPhase } from 'hooklib'
import {
createJob,
getContainerJobPodName,
getPodLogs,
getPodStatus,
waitForJobToComplete,
waitForPodPhases
} from '../k8s'
import { JOB_CONTAINER_NAME } from './constants'
import { containerVolumes } from '../k8s/utils'
export async function runContainerStep(stepContainer): Promise<number> {
if (stepContainer.dockerfile) {
throw new Error('Building container actions is not currently supported')
}
const container = createPodSpec(stepContainer)
const job = await createJob(container)
if (!job.metadata?.name) {
throw new Error(
`Expected job ${JSON.stringify(
job
)} to have correctly set the metadata.name`
)
}
const podName = await getContainerJobPodName(job.metadata.name)
await waitForPodPhases(
podName,
new Set([PodPhase.COMPLETED, PodPhase.RUNNING]),
new Set([PodPhase.PENDING])
)
await getPodLogs(podName, JOB_CONTAINER_NAME)
await waitForJobToComplete(job.metadata.name)
// pod has failed so pull the status code from the container
const status = await getPodStatus(podName)
if (!status?.containerStatuses?.length) {
core.warning(`Can't determine container status`)
return 0
}
const exitCode =
status.containerStatuses[status.containerStatuses.length - 1].state
?.terminated?.exitCode
return Number(exitCode) || 0
}
function createPodSpec(container): k8s.V1Container {
const podContainer = new k8s.V1Container()
podContainer.name = JOB_CONTAINER_NAME
podContainer.image = container.image
if (container.entryPoint) {
podContainer.command = [container.entryPoint, ...container.entryPointArgs]
}
podContainer.env = []
for (const [key, value] of Object.entries(
container['environmentVariables']
)) {
if (value && key !== 'HOME') {
podContainer.env.push({ name: key, value: value as string })
}
}
podContainer.volumeMounts = containerVolumes()
return podContainer
}

View File

@@ -0,0 +1,38 @@
/* eslint-disable @typescript-eslint/no-unused-vars */
import { RunScriptStepArgs } from 'hooklib'
import { execPodStep } from '../k8s'
import { JOB_CONTAINER_NAME } from './constants'
export async function runScriptStep(
args: RunScriptStepArgs,
state,
responseFile
): Promise<void> {
const cb = new CommandsBuilder(
args.entryPoint,
args.entryPointArgs,
args.environmentVariables
)
await execPodStep(cb.command, state.jobPod, JOB_CONTAINER_NAME)
}
class CommandsBuilder {
constructor(
private entryPoint: string,
private entryPointArgs: string[],
private environmentVariables: { [key: string]: string }
) {}
get command(): string[] {
const envCommands: string[] = []
if (
this.environmentVariables &&
Object.entries(this.environmentVariables).length
) {
for (const [key, value] of Object.entries(this.environmentVariables)) {
envCommands.push(`${key}=${value}`)
}
}
return ['env', ...envCommands, this.entryPoint, ...this.entryPointArgs]
}
}

44
packages/k8s/src/index.ts Normal file
View File

@@ -0,0 +1,44 @@
import { Command, getInputFromStdin, prepareJobArgs } from 'hooklib'
import {
cleanupJob,
prepareJob,
runContainerStep,
runScriptStep
} from './hooks'
async function run(): Promise<void> {
const input = await getInputFromStdin()
const args = input['args']
const command = input['command']
const responseFile = input['responseFile']
const state = input['state']
let exitCode = 0
try {
switch (command) {
case Command.PrepareJob:
await prepareJob(args as prepareJobArgs, responseFile)
break
case Command.CleanupJob:
await cleanupJob()
break
case Command.RunScriptStep:
await runScriptStep(args, state, null)
break
case Command.RunContainerStep:
exitCode = await runContainerStep(args)
break
case Command.runContainerStep:
default:
throw new Error(`Command not recognized: ${command}`)
}
} catch (error) {
// eslint-disable-next-line no-console
console.log(error)
exitCode = 1
}
process.exitCode = exitCode
}
void run()

View File

@@ -0,0 +1,524 @@
import * as k8s from '@kubernetes/client-node'
import { ContainerInfo, PodPhase, Registry } from 'hooklib'
import * as stream from 'stream'
import { v4 as uuidv4 } from 'uuid'
import {
getJobPodName,
getRunnerPodName,
getVolumeClaimName,
RunnerInstanceLabel
} from '../hooks/constants'
const kc = new k8s.KubeConfig()
kc.loadFromDefault()
const k8sApi = kc.makeApiClient(k8s.CoreV1Api)
const k8sBatchV1Api = kc.makeApiClient(k8s.BatchV1Api)
const k8sAuthorizationV1Api = kc.makeApiClient(k8s.AuthorizationV1Api)
export const POD_VOLUME_NAME = 'work'
export const requiredPermissions = [
{
group: '',
verbs: ['get', 'list', 'create', 'delete'],
resource: 'pods',
subresource: ''
},
{
group: '',
verbs: ['get', 'create'],
resource: 'pods',
subresource: 'exec'
},
{
group: '',
verbs: ['get', 'list', 'watch'],
resource: 'pods',
subresource: 'log'
},
{
group: 'batch',
verbs: ['get', 'list', 'create', 'delete'],
resource: 'jobs',
subresource: ''
}
]
const secretPermission = {
group: '',
verbs: ['get', 'list', 'create', 'delete'],
resource: 'secrets',
subresource: ''
}
export async function createPod(
jobContainer?: k8s.V1Container,
services?: k8s.V1Container[],
registry?: Registry
): Promise<k8s.V1Pod> {
const containers: k8s.V1Container[] = []
if (jobContainer) {
containers.push(jobContainer)
}
if (services?.length) {
containers.push(...services)
}
const appPod = new k8s.V1Pod()
appPod.apiVersion = 'v1'
appPod.kind = 'Pod'
appPod.metadata = new k8s.V1ObjectMeta()
appPod.metadata.name = getJobPodName()
const instanceLabel = new RunnerInstanceLabel()
appPod.metadata.labels = {
[instanceLabel.key]: instanceLabel.value
}
appPod.spec = new k8s.V1PodSpec()
appPod.spec.containers = containers
appPod.spec.restartPolicy = 'Never'
appPod.spec.nodeName = await getCurrentNodeName()
const claimName = getVolumeClaimName()
appPod.spec.volumes = [
{
name: 'work',
persistentVolumeClaim: { claimName }
}
]
if (registry) {
if (await isSecretsAuthOK()) {
const secret = await createDockerSecret(registry)
if (!secret?.metadata?.name) {
throw new Error(`created secret does not have secret.metadata.name`)
}
const secretReference = new k8s.V1LocalObjectReference()
secretReference.name = secret.metadata.name
appPod.spec.imagePullSecrets = [secretReference]
} else {
throw new Error(
`Pulls from private registry is not allowed. Please contact your self hosted runner administrator. Service account needs permissions for ${secretPermission.verbs} in resource ${secretPermission.resource}`
)
}
}
const { body } = await k8sApi.createNamespacedPod(namespace(), appPod)
return body
}
export async function createJob(
container: k8s.V1Container
): Promise<k8s.V1Job> {
const job = new k8s.V1Job()
job.apiVersion = 'batch/v1'
job.kind = 'Job'
job.metadata = new k8s.V1ObjectMeta()
job.metadata.name = getJobPodName()
job.metadata.labels = { 'runner-pod': getRunnerPodName() }
job.spec = new k8s.V1JobSpec()
job.spec.ttlSecondsAfterFinished = 300
job.spec.backoffLimit = 0
job.spec.template = new k8s.V1PodTemplateSpec()
job.spec.template.spec = new k8s.V1PodSpec()
job.spec.template.spec.containers = [container]
job.spec.template.spec.restartPolicy = 'Never'
job.spec.template.spec.nodeName = await getCurrentNodeName()
const claimName = `${runnerName()}-work`
job.spec.template.spec.volumes = [
{
name: 'work',
persistentVolumeClaim: { claimName }
}
]
const { body } = await k8sBatchV1Api.createNamespacedJob(namespace(), job)
return body
}
export async function getContainerJobPodName(jobName: string): Promise<string> {
const selector = `job-name=${jobName}`
const backOffManager = new BackOffManager(60)
while (true) {
const podList = await k8sApi.listNamespacedPod(
namespace(),
undefined,
undefined,
undefined,
undefined,
selector,
1
)
if (!podList.body.items?.length) {
await backOffManager.backOff()
continue
}
if (!podList.body.items[0].metadata?.name) {
throw new Error(
`Failed to determine the name of the pod for job ${jobName}`
)
}
return podList.body.items[0].metadata.name
}
}
export async function deletePod(podName: string): Promise<void> {
await k8sApi.deleteNamespacedPod(podName, namespace())
}
export async function execPodStep(
command: string[],
podName: string,
containerName: string,
stdin?: stream.Readable
): Promise<void> {
// TODO, we need to add the path from `prependPath` to the PATH variable. How can we do that? Maybe another exec before running this one?
// Maybe something like, get the current path, if these entries aren't in it, add them, then set the current path to that?
// TODO: how do we set working directory? There doesn't seem to be an easy way to do it. Should we cd then execute our bash script?
const exec = new k8s.Exec(kc)
return new Promise(async function (resolve, reject) {
try {
await exec.exec(
namespace(),
podName,
containerName,
command,
process.stdout,
process.stderr,
stdin ?? null,
false /* tty */,
resp => {
// kube.exec returns an error if exit code is not 0, but we can't actually get the exit code
if (resp.status === 'Success') {
resolve()
} else {
reject(
JSON.stringify({ message: resp?.message, details: resp?.details })
)
}
}
)
} catch (error) {
reject(error)
}
})
}
export async function waitForJobToComplete(jobName: string): Promise<void> {
const backOffManager = new BackOffManager()
while (true) {
try {
if (await isJobSucceeded(jobName)) {
return
}
} catch (error) {
throw new Error(`job ${jobName} has failed`)
}
await backOffManager.backOff()
}
}
export async function createDockerSecret(
registry: Registry
): Promise<k8s.V1Secret> {
const authContent = {
auths: {
[registry.serverUrl]: {
username: registry.username,
password: registry.password,
auth: Buffer.from(
`${registry.username}:${registry.password}`,
'base64'
).toString()
}
}
}
const secretName = generateSecretName()
const secret = new k8s.V1Secret()
secret.immutable = true
secret.apiVersion = 'v1'
secret.metadata = new k8s.V1ObjectMeta()
secret.metadata.name = secretName
secret.kind = 'Secret'
secret.data = {
'.dockerconfigjson': Buffer.from(
JSON.stringify(authContent),
'base64'
).toString()
}
const { body } = await k8sApi.createNamespacedSecret(namespace(), secret)
return body
}
export async function waitForPodPhases(
podName: string,
awaitingPhases: Set<PodPhase>,
backOffPhases: Set<PodPhase>,
maxTimeSeconds = 45 * 60 // 45 min
): Promise<void> {
const backOffManager = new BackOffManager(maxTimeSeconds)
let phase: PodPhase = PodPhase.UNKNOWN
try {
while (true) {
phase = await getPodPhase(podName)
if (awaitingPhases.has(phase)) {
return
}
if (!backOffPhases.has(phase)) {
throw new Error(
`Pod ${podName} is unhealthy with phase status ${phase}`
)
}
await backOffManager.backOff()
}
} catch (error) {
throw new Error(`Pod ${podName} is unhealthy with phase status ${phase}`)
}
}
async function getPodPhase(podName: string): Promise<PodPhase> {
const podPhaseLookup = new Set<string>([
PodPhase.PENDING,
PodPhase.RUNNING,
PodPhase.SUCCEEDED,
PodPhase.FAILED,
PodPhase.UNKNOWN
])
const { body } = await k8sApi.readNamespacedPod(podName, namespace())
const pod = body
if (!pod.status?.phase || !podPhaseLookup.has(pod.status.phase)) {
return PodPhase.UNKNOWN
}
return pod.status?.phase
}
async function isJobSucceeded(jobName: string): Promise<boolean> {
const { body } = await k8sBatchV1Api.readNamespacedJob(jobName, namespace())
const job = body
if (job.status?.failed) {
throw new Error(`job ${jobName} has failed`)
}
return !!job.status?.succeeded
}
export async function getPodLogs(
podName: string,
containerName: string
): Promise<void> {
const log = new k8s.Log(kc)
const logStream = new stream.PassThrough()
logStream.on('data', chunk => {
// use write rather than console.log to prevent double line feed
process.stdout.write(chunk)
})
logStream.on('error', err => {
process.stderr.write(JSON.stringify(err))
})
const r = await log.log(namespace(), podName, containerName, logStream, {
follow: true,
tailLines: 50,
pretty: false,
timestamps: false
})
await new Promise(resolve => r.on('close', () => resolve(null)))
}
export async function podPrune(): Promise<void> {
const podList = await k8sApi.listNamespacedPod(
namespace(),
undefined,
undefined,
undefined,
undefined,
new RunnerInstanceLabel().toString()
)
if (!podList.body.items.length) {
return
}
await Promise.all(
podList.body.items.map(
pod => pod.metadata?.name && deletePod(pod.metadata.name)
)
)
}
export async function getPodStatus(
name: string
): Promise<k8s.V1PodStatus | undefined> {
const { body } = await k8sApi.readNamespacedPod(name, namespace())
return body.status
}
export async function isAuthPermissionsOK(): Promise<boolean> {
const sar = new k8s.V1SelfSubjectAccessReview()
const asyncs: Promise<{
response: unknown
body: k8s.V1SelfSubjectAccessReview
}>[] = []
for (const resource of requiredPermissions) {
for (const verb of resource.verbs) {
sar.spec = new k8s.V1SelfSubjectAccessReviewSpec()
sar.spec.resourceAttributes = new k8s.V1ResourceAttributes()
sar.spec.resourceAttributes.verb = verb
sar.spec.resourceAttributes.namespace = namespace()
sar.spec.resourceAttributes.group = resource.group
sar.spec.resourceAttributes.resource = resource.resource
sar.spec.resourceAttributes.subresource = resource.subresource
asyncs.push(k8sAuthorizationV1Api.createSelfSubjectAccessReview(sar))
}
}
const responses = await Promise.all(asyncs)
return responses.every(resp => resp.body.status?.allowed)
}
export async function isSecretsAuthOK(): Promise<boolean> {
const sar = new k8s.V1SelfSubjectAccessReview()
const asyncs: Promise<{
response: unknown
body: k8s.V1SelfSubjectAccessReview
}>[] = []
for (const verb of secretPermission.verbs) {
sar.spec = new k8s.V1SelfSubjectAccessReviewSpec()
sar.spec.resourceAttributes = new k8s.V1ResourceAttributes()
sar.spec.resourceAttributes.verb = verb
sar.spec.resourceAttributes.namespace = namespace()
sar.spec.resourceAttributes.group = secretPermission.group
sar.spec.resourceAttributes.resource = secretPermission.resource
sar.spec.resourceAttributes.subresource = secretPermission.subresource
asyncs.push(k8sAuthorizationV1Api.createSelfSubjectAccessReview(sar))
}
const responses = await Promise.all(asyncs)
return responses.every(resp => resp.body.status?.allowed)
}
export async function isPodContainerAlpine(
podName: string,
containerName: string
): Promise<boolean> {
let isAlpine = true
try {
await execPodStep(
[
'sh',
'-c',
"[ $(cat /etc/*release* | grep -i -e '^ID=*alpine*' -c) != 0 ] || exit 1"
],
podName,
containerName
)
} catch (err) {
isAlpine = false
}
return isAlpine
}
async function getCurrentNodeName(): Promise<string> {
const resp = await k8sApi.readNamespacedPod(getRunnerPodName(), namespace())
const nodeName = resp.body.spec?.nodeName
if (!nodeName) {
throw new Error('Failed to determine node name')
}
return nodeName
}
export function namespace(): string {
if (process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE']) {
return process.env['ACTIONS_RUNNER_KUBERNETES_NAMESPACE']
}
const context = kc.getContexts().find(ctx => ctx.namespace)
if (!context?.namespace) {
throw new Error(
'Failed to determine namespace, falling back to `default`. Namespace should be set in context, or in env variable "ACTIONS_RUNNER_KUBERNETES_NAMESPACE"'
)
}
return context.namespace
}
function generateSecretName(): string {
return `github-secret-${uuidv4()}`
}
function runnerName(): string {
const name = process.env.ACTIONS_RUNNER_POD_NAME
if (!name) {
throw new Error(
'Failed to determine runner name. "ACTIONS_RUNNER_POD_NAME" env variables should be set.'
)
}
return name
}
class BackOffManager {
private backOffSeconds = 1
totalTime = 0
constructor(private throwAfterSeconds?: number) {
if (!throwAfterSeconds || throwAfterSeconds < 0) {
this.throwAfterSeconds = undefined
}
}
async backOff(): Promise<void> {
await new Promise(resolve =>
setTimeout(resolve, this.backOffSeconds * 1000)
)
this.totalTime += this.backOffSeconds
if (this.throwAfterSeconds && this.throwAfterSeconds < this.totalTime) {
throw new Error('backoff timeout')
}
if (this.backOffSeconds < 20) {
this.backOffSeconds *= 2
}
if (this.backOffSeconds > 20) {
this.backOffSeconds = 20
}
}
}
export function containerPorts(
container: ContainerInfo
): k8s.V1ContainerPort[] {
// 8080:8080/tcp
const portFormat = /(\d{1,5})(:(\d{1,5}))?(\/(tcp|udp))?/
const ports: k8s.V1ContainerPort[] = []
for (const portDefinition of container.portMappings) {
const submatches = portFormat.exec(portDefinition)
if (!submatches) {
throw new Error(
`Port definition "${portDefinition}" is in incorrect format`
)
}
const port = new k8s.V1ContainerPort()
port.hostPort = Number(submatches[1])
if (submatches[3]) {
port.containerPort = Number(submatches[3])
}
if (submatches[5]) {
port.protocol = submatches[5].toUpperCase()
} else {
port.protocol = 'TCP'
}
ports.push(port)
}
return ports
}

View File

@@ -0,0 +1,65 @@
import * as k8s from '@kubernetes/client-node'
import { Mount } from 'hooklib'
import * as path from 'path'
import { POD_VOLUME_NAME } from './index'
export const DEFAULT_CONTAINER_ENTRY_POINT_ARGS = [`-f`, `/dev/null`]
export const DEFAULT_CONTAINER_ENTRY_POINT = 'tail'
export function containerVolumes(
userMountVolumes: Mount[] = [],
jobContainer = true
): k8s.V1VolumeMount[] {
const mounts: k8s.V1VolumeMount[] = [
{
name: POD_VOLUME_NAME,
mountPath: '/__w'
}
]
if (!jobContainer) {
return mounts
}
mounts.push(
{
name: POD_VOLUME_NAME,
mountPath: '/__e',
subPath: 'externals'
},
{
name: POD_VOLUME_NAME,
mountPath: '/github/home',
subPath: '_temp/_github_home'
},
{
name: POD_VOLUME_NAME,
mountPath: '/github/workflow',
subPath: '_temp/_github_workflow'
}
)
if (!userMountVolumes?.length) {
return mounts
}
for (const userVolume of userMountVolumes) {
const sourceVolumePath = `${
path.isAbsolute(userVolume.sourceVolumePath)
? userVolume.sourceVolumePath
: path.join(
process.env.GITHUB_WORKSPACE as string,
userVolume.sourceVolumePath
)
}`
mounts.push({
name: POD_VOLUME_NAME,
mountPath: userVolume.targetVolumePath,
subPath: sourceVolumePath,
readOnly: userVolume.readOnly
})
}
return mounts
}

View File

@@ -0,0 +1,31 @@
import * as path from 'path'
import * as fs from 'fs'
import { prepareJob, cleanupJob } from '../src/hooks'
import { TestTempOutput } from './test-setup'
let testTempOutput: TestTempOutput
const prepareJobJsonPath = path.resolve(
`${__dirname}/../../../examples/prepare-job.json`
)
let prepareJobOutputFilePath: string
describe('Cleanup Job', () => {
beforeEach(async () => {
const prepareJobJson = fs.readFileSync(prepareJobJsonPath)
let prepareJobData = JSON.parse(prepareJobJson.toString())
testTempOutput = new TestTempOutput()
testTempOutput.initialize()
prepareJobOutputFilePath = testTempOutput.createFile(
'prepare-job-output.json'
)
await prepareJob(prepareJobData.args, prepareJobOutputFilePath)
})
it('should not throw', async () => {
const outputJson = fs.readFileSync(prepareJobOutputFilePath)
const outputData = JSON.parse(outputJson.toString())
await expect(cleanupJob()).resolves.not.toThrow()
})
})

View File

@@ -0,0 +1,66 @@
import * as fs from 'fs'
import * as path from 'path'
import {
cleanupJob,
prepareJob,
runContainerStep,
runScriptStep
} from '../src/hooks'
import { TestTempOutput } from './test-setup'
jest.useRealTimers()
let testTempOutput: TestTempOutput
const prepareJobJsonPath = path.resolve(
`${__dirname}/../../../../examples/prepare-job.json`
)
const runScriptStepJsonPath = path.resolve(
`${__dirname}/../../../../examples/run-script-step.json`
)
let runContainerStepJsonPath = path.resolve(
`${__dirname}/../../../../examples/run-container-step.json`
)
let prepareJobData: any
let prepareJobOutputFilePath: string
describe('e2e', () => {
beforeEach(() => {
const prepareJobJson = fs.readFileSync(prepareJobJsonPath)
prepareJobData = JSON.parse(prepareJobJson.toString())
testTempOutput = new TestTempOutput()
testTempOutput.initialize()
prepareJobOutputFilePath = testTempOutput.createFile(
'prepare-job-output.json'
)
})
afterEach(async () => {
testTempOutput.cleanup()
})
it('should prepare job, run script step, run container step then cleanup without errors', async () => {
await expect(
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
).resolves.not.toThrow()
const scriptStepContent = fs.readFileSync(runScriptStepJsonPath)
const scriptStepData = JSON.parse(scriptStepContent.toString())
const prepareJobOutputJson = fs.readFileSync(prepareJobOutputFilePath)
const prepareJobOutputData = JSON.parse(prepareJobOutputJson.toString())
await expect(
runScriptStep(scriptStepData.args, prepareJobOutputData.state, null)
).resolves.not.toThrow()
const runContainerStepContent = fs.readFileSync(runContainerStepJsonPath)
const runContainerStepData = JSON.parse(runContainerStepContent.toString())
await expect(
runContainerStep(runContainerStepData.args)
).resolves.not.toThrow()
await expect(cleanupJob()).resolves.not.toThrow()
})
})

View File

@@ -0,0 +1,47 @@
import * as fs from 'fs'
import * as path from 'path'
import { cleanupJob } from '../src/hooks'
import { prepareJob } from '../src/hooks/prepare-job'
import { TestTempOutput } from './test-setup'
jest.useRealTimers()
let testTempOutput: TestTempOutput
const prepareJobJsonPath = path.resolve(
`${__dirname}/../../../examples/prepare-job.json`
)
let prepareJobData: any
let prepareJobOutputFilePath: string
describe('Prepare job', () => {
beforeEach(() => {
const prepareJobJson = fs.readFileSync(prepareJobJsonPath)
prepareJobData = JSON.parse(prepareJobJson.toString())
testTempOutput = new TestTempOutput()
testTempOutput.initialize()
prepareJobOutputFilePath = testTempOutput.createFile(
'prepare-job-output.json'
)
})
afterEach(async () => {
const outputJson = fs.readFileSync(prepareJobOutputFilePath)
const outputData = JSON.parse(outputJson.toString())
await cleanupJob()
testTempOutput.cleanup()
})
it('should not throw exception', async () => {
await expect(
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
).resolves.not.toThrow()
})
it('should generate output file in JSON format', async () => {
await prepareJob(prepareJobData.args, prepareJobOutputFilePath)
const content = fs.readFileSync(prepareJobOutputFilePath)
expect(() => JSON.parse(content.toString())).not.toThrow()
})
})

View File

@@ -0,0 +1,27 @@
import { TestTempOutput } from './test-setup'
import * as path from 'path'
import { runContainerStep } from '../src/hooks'
import * as fs from 'fs'
jest.useRealTimers()
let testTempOutput: TestTempOutput
let runContainerStepJsonPath = path.resolve(
`${__dirname}/../../../examples/run-container-step.json`
)
let runContainerStepData: any
describe('Run container step', () => {
beforeAll(() => {
const content = fs.readFileSync(runContainerStepJsonPath)
runContainerStepData = JSON.parse(content.toString())
process.env.RUNNER_NAME = 'testjob'
})
it('should not throw', async () => {
await expect(
runContainerStep(runContainerStepData.args)
).resolves.not.toThrow()
})
})

View File

@@ -0,0 +1,61 @@
import { prepareJob, cleanupJob, runScriptStep } from '../src/hooks'
import { TestTempOutput } from './test-setup'
import * as path from 'path'
import * as fs from 'fs'
jest.useRealTimers()
let testTempOutput: TestTempOutput
const prepareJobJsonPath = path.resolve(
`${__dirname}/../../../examples/prepare-job.json`
)
let prepareJobData: any
let prepareJobOutputFilePath: string
let prepareJobOutputData: any
describe('Run script step', () => {
beforeEach(async () => {
const prepareJobJson = fs.readFileSync(prepareJobJsonPath)
prepareJobData = JSON.parse(prepareJobJson.toString())
console.log(prepareJobData)
testTempOutput = new TestTempOutput()
testTempOutput.initialize()
prepareJobOutputFilePath = testTempOutput.createFile(
'prepare-job-output.json'
)
await prepareJob(prepareJobData.args, prepareJobOutputFilePath)
const outputContent = fs.readFileSync(prepareJobOutputFilePath)
prepareJobOutputData = JSON.parse(outputContent.toString())
})
afterEach(async () => {
await cleanupJob()
testTempOutput.cleanup()
})
// NOTE: To use this test, do kubectl apply -f podspec.yaml (from podspec examples)
// then change the name of the file to 'run-script-step-test.ts' and do
// npm run test run-script-step
it('should not throw an exception', async () => {
const args = {
entryPointArgs: ['echo "test"'],
entryPoint: '/bin/bash',
environmentVariables: {
NODE_ENV: 'development'
},
prependPath: ['/foo/bar', 'bar/foo'],
workingDirectory: '/__w/thboop-test2/thboop-test2'
}
const state = {
jobPod: prepareJobOutputData.state.jobPod
}
const responseFile = null
await expect(
runScriptStep(args, state, responseFile)
).resolves.not.toThrow()
})
})

View File

@@ -0,0 +1,28 @@
import * as fs from 'fs'
import { v4 as uuidv4 } from 'uuid'
export class TestTempOutput {
private tempDirPath: string
constructor() {
this.tempDirPath = `${__dirname}/_temp/${uuidv4()}`
}
public initialize(): void {
fs.mkdirSync(this.tempDirPath, { recursive: true })
}
public cleanup(): void {
fs.rmSync(this.tempDirPath, { recursive: true })
}
public createFile(fileName?: string): string {
const filePath = `${this.tempDirPath}/${fileName || uuidv4()}`
fs.writeFileSync(filePath, '')
return filePath
}
public removeFile(fileName: string): void {
const filePath = `${this.tempDirPath}/${fileName}`
fs.rmSync(filePath)
}
}

View File

@@ -0,0 +1,11 @@
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"baseUrl": "./",
"outDir": "./lib",
"rootDir": "./src"
},
"include": [
"./src"
]
}