Compare commits

..

1 Commits

Author SHA1 Message Date
Yashwanth Anantharaju
22df069d5a dump message 2021-06-07 09:53:09 -04:00
120 changed files with 2808 additions and 3308 deletions

View File

@@ -1,8 +0,0 @@
# https://editorconfig.org/
[*]
insert_final_newline = true # ensure all files end with a single newline
trim_trailing_whitespace = true # attempt to remove trailing whitespace on save
[*.md]
trim_trailing_whitespace = false # in markdown, "two trailing spaces" is unfortunately meaningful; it means `<br>`

6
.gitattributes vendored
View File

@@ -20,7 +20,7 @@
#
# Merging from the command prompt will add diff markers to the files if there
# are conflicts (Merging from VS is not affected by the settings below, in VS
# the diff markers are never inserted). Diff markers may cause the following
# the diff markers are never inserted). Diff markers may cause the following
# file extensions to fail to load in VS. An alternative would be to treat
# these files as binary and thus will always conflict and require user
# intervention with every merge. To do so, just uncomment the entries below
@@ -70,9 +70,9 @@
###############################################################################
# diff behavior for common document formats
#
#
# Convert binary document formats to text before diffing them. This feature
# is only available from the command line. Turn it on by uncommenting the
# is only available from the command line. Turn it on by uncommenting the
# entries below.
###############################################################################
*.doc diff=astextplain

View File

@@ -24,4 +24,4 @@ If applicable, add a code snippet.
**Additional information**
Add any other context about the feature here.
NOTE: if the feature request has been agreed upon then the assignee will create an ADR. See docs/adrs/README.md
NOTE: if the feature request has been agreed upon then the assignee will create an ADR. See docs/adrs/README.md

View File

@@ -7,12 +7,12 @@ on:
- main
- releases/*
paths-ignore:
- '**.md'
- '**.md'
pull_request:
branches:
- '*'
paths-ignore:
- '**.md'
- '**.md'
jobs:
build:
@@ -37,7 +37,7 @@ jobs:
devScript: ./dev.sh
- runtime: win-x64
os: windows-2019
os: windows-latest
devScript: ./dev
runs-on: ${{ matrix.os }}

View File

@@ -28,7 +28,7 @@ jobs:
# languages: go, javascript, csharp, python, cpp, java
- name: Manual build
run : |
run : |
./dev.sh layout Release linux-x64
working-directory: src

335
.github/workflows/e2etest.yml vendored Normal file
View File

@@ -0,0 +1,335 @@
name: Runner E2E Test
on:
workflow_dispatch:
push:
branches:
- main
- releases/*
jobs:
init:
name: Initialize workflow ☕
runs-on: ubuntu-latest
outputs:
unique_runner_label: ${{steps.generator.outputs.runner_label}}
steps:
- name: Delete all runners
uses: actions/github-script@v3
with:
debug: true
script: |
var runnersResp = await github.actions.listSelfHostedRunnersForRepo({
owner: 'actions',
repo: 'runner',
per_page: '100'
});
for(var i=0; i<runnersResp.data.total_count; i++){
core.debug(JSON.stringify(runnersResp.data.runners[i]))
await github.actions.deleteSelfHostedRunnerFromRepo({
owner: 'actions',
repo: 'runner',
runner_id: runnersResp.data.runners[i].id
});
}
github-token: ${{secrets.PAT}}
- name: Generate Unique Runner label
id: generator
run: |
label=$(openssl rand -hex 16)
echo ::set-output name=runner_label::$label
build:
name: Build runner packages 🏗 📦
strategy:
matrix:
runtime: [ linux-x64, linux-arm64, linux-arm, win-x64, osx-x64 ]
include:
- runtime: linux-x64
os: ubuntu-latest
devScript: ./dev.sh
- runtime: linux-arm64
os: ubuntu-latest
devScript: ./dev.sh
- runtime: linux-arm
os: ubuntu-latest
devScript: ./dev.sh
- runtime: osx-x64
os: macOS-latest
devScript: ./dev.sh
- runtime: win-x64
os: windows-latest
devScript: ./dev
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v1
# Build runner layout
- name: Build & Layout Release
run: |
${{ matrix.devScript }} layout Release ${{ matrix.runtime }}
working-directory: src
# Create runner package tar.gz/zip
- name: Package Release
run: |
${{ matrix.devScript }} package Release ${{ matrix.runtime }}
working-directory: src
# Upload runner package tar.gz/zip as artifact
- name: Publish Artifact
uses: actions/upload-artifact@v1
with:
name: runner-package-${{ matrix.runtime }}
path: _package
dispatch_workflow:
name: Dispatch workflow to runners 🚨
needs: [init, build]
runs-on: ubuntu-latest
steps:
- name: Dispatch workflow
timeout-minutes: 10
uses: actions/github-script@v3
with:
debug: true
script: |
function sleep(ms) { return new Promise(resolve => setTimeout(resolve, ms)); }
async function dispatchWorkflow(runner) {
await github.actions.createWorkflowDispatch({
owner: 'actions',
repo: 'runner',
workflow_id: 'runner-basic-e2e-test-case.yml',
ref: 'main',
inputs: {target_runner: runner}
});
}
var runWin64 = false, runLinux64 = false, runOsx64 = false, runLinuxARM64 = false;
while (true) {
core.info(`------------- Waiting for runners to be configured --------------`)
await sleep(10000);
var runnersResp = await github.actions.listSelfHostedRunnersForRepo({owner: 'actions', repo: 'runner', per_page: '100'});
for (var i = 0; i < runnersResp.data.total_count; i++) {
core.debug(JSON.stringify(runnersResp.data.runners[i]))
var labels = runnersResp.data.runners[i].labels;
for (var j = 0; j < labels.length; j++) {
core.debug(`Comparing: ${labels[j].name} to win-x64/linux-x64/osx-x64/linux-arm64-${{ needs.init.outputs.unique_runner_label }}`)
if (labels[j].name == 'win-x64-${{needs.init.outputs.unique_runner_label}}' && runWin64 == false) {
core.info(`------------------- Windows runner is configured, queue Windows Run -------------------------`)
runWin64 = true;
await dispatchWorkflow('win-x64-${{needs.init.outputs.unique_runner_label}}');
break;
} else if (labels[j].name == 'linux-x64-${{needs.init.outputs.unique_runner_label}}' && runLinux64 == false) {
core.info(`------------------- Linux runner is configured, queue Linux Run -------------------------`)
runLinux64 = true;
await dispatchWorkflow('linux-x64-${{needs.init.outputs.unique_runner_label}}');
break;
} else if (labels[j].name == 'osx-x64-${{needs.init.outputs.unique_runner_label}}' && runOsx64 == false) {
core.info(`------------------- macOS runner is configured, queue macOS Run -------------------------`)
runOsx64 = true;
await dispatchWorkflow('osx-x64-${{needs.init.outputs.unique_runner_label}}');
break;
} else if (labels[j].name == 'linux-arm64-${{needs.init.outputs.unique_runner_label}}' && runLinuxARM64 == false) {
core.info(`------------------- Linux ARM64 runner is configured, queue Linux ARM64 Run-------------------------`)
runLinuxARM64 = true;
await dispatchWorkflow('linux-arm64-${{needs.init.outputs.unique_runner_label}}');
break;
}
}
}
if (runWin64 && runLinux64 && runOsx64 && runLinuxARM64) {
core.info(`--------------------- ALL runner are running jobs --------------------------`)
break;
} else {
core.info(`---------- Windows running: ${runWin64} -- Linux running: ${runLinux64} -- macOS running: ${runOsx64} -- Linux ARM64 running: ${runLinuxARM64} -----------`)
}
}
github-token: ${{secrets.PAT}}
LinuxE2E:
needs: [build, init]
runs-on: ubuntu-latest
steps:
- name: Download Runner
uses: actions/download-artifact@v2
with:
name: runner-package-linux-x64
- name: Unzip Runner Package
run: |
tar -xzf *.tar.gz
- name: Configure Runner
env:
unique_runner_name: linux-x64-${{needs.init.outputs.unique_runner_label}}
run: |
./config.sh --url ${{github.event.repository.html_url}} --unattended --name $unique_runner_name --pat ${{secrets.PAT}} --labels $unique_runner_name --replace
- name: Start Runner and Wait for Job
timeout-minutes: 5
run: |
./run.sh --once
- name: Remove Runner
if: always()
continue-on-error: true
run: |
./config.sh remove --pat ${{secrets.PAT}}
- name: Upload Runner Logs
if: always()
uses: actions/upload-artifact@v2
with:
name: linux_x64_logs
path: _diag
macOSE2E:
needs: [build, init]
runs-on: macos-latest
steps:
- name: Download Runner
uses: actions/download-artifact@v2
with:
name: runner-package-osx-x64
- name: Unzip Runner Package
run: |
tar -xzf *.tar.gz
- name: Configure Runner
env:
unique_runner_name: osx-x64-${{needs.init.outputs.unique_runner_label}}
run: |
./config.sh --url ${{github.event.repository.html_url}} --unattended --name $unique_runner_name --pat ${{secrets.PAT}} --labels $unique_runner_name --replace
- name: Start Runner and Wait for Job
timeout-minutes: 5
run: |
./run.sh --once
- name: Remove Runner
if: always()
continue-on-error: true
run: |
./config.sh remove --pat ${{secrets.PAT}}
- name: Upload Runner Logs
if: always()
uses: actions/upload-artifact@v2
with:
name: osx_x64_logs
path: _diag
ARM64E2E:
needs: [build, init]
runs-on: ubuntu-latest
steps:
- name: Download Runner
uses: actions/download-artifact@v2
with:
name: runner-package-linux-arm64
- name: Unzip Runner Package
run: |
tar -xzf *.tar.gz
- name: Prepare QEMU
run: |
docker run --rm --privileged multiarch/qemu-user-static:register --reset
- name: Configure Runner
uses: docker://multiarch/ubuntu-core:arm64-bionic
with:
args: 'bash -c "apt-get update && apt-get install -y curl && ./bin/installdependencies.sh && ./config.sh --unattended --name $unique_runner_name --url ${{github.event.repository.html_url}} --pat ${{secrets.PAT}} --labels $unique_runner_name --replace"'
env:
RUNNER_ALLOW_RUNASROOT: 1
unique_runner_name: linux-arm64-${{needs.init.outputs.unique_runner_label}}
- name: Start Runner and Wait for Job
timeout-minutes: 5
uses: docker://multiarch/ubuntu-core:arm64-bionic
with:
args: 'bash -c "apt-get update && apt-get install -y curl git && ./bin/installdependencies.sh && ./run.sh --once"'
env:
RUNNER_ALLOW_RUNASROOT: 1
- name: Remove Runner
if: always()
continue-on-error: true
uses: docker://multiarch/ubuntu-core:arm64-bionic
with:
args: 'bash -c "apt-get update && apt-get install -y curl && ./bin/installdependencies.sh && ./config.sh remove --pat ${{secrets.PAT}}"'
env:
RUNNER_ALLOW_RUNASROOT: 1
- name: Upload Runner Logs
if: always()
uses: actions/upload-artifact@v2
with:
name: linux_arm64_logs
path: _diag
WindowsE2E:
needs: [build, init]
runs-on: windows-latest
steps:
- name: Download Runner
uses: actions/download-artifact@v2
with:
name: runner-package-win-x64
- name: Unzip Runner Package
run: |
Get-ChildItem *.zip | Expand-Archive -DestinationPath $PWD
- name: Configure Runner
shell: cmd
run: |
config.cmd --unattended --url ${{github.event.repository.html_url}} --name %unique_runner_name% --pat ${{secrets.PAT}} --labels %unique_runner_name% --replace
env:
unique_runner_name: win-x64-${{needs.init.outputs.unique_runner_label}}
- name: Start Runner and Wait for Job
shell: cmd
timeout-minutes: 5
run: |
run.cmd --once
- name: Remove Runner
shell: cmd
if: always()
continue-on-error: true
run: |
config.cmd remove --pat ${{secrets.PAT}}
- name: Upload Runner Logs
if: always()
uses: actions/upload-artifact@v2
with:
name: win_x64_logs
path: _diag
check:
name: Check runner logs 🕵️‍♂️
needs: [WindowsE2E, LinuxE2E, macOSE2E, ARM64E2E]
runs-on: ubuntu-latest
steps:
- name: Download Linux Runner Logs
uses: actions/download-artifact@v2
with:
name: linux_x64_logs
path: linux_x64_logs
- name: Download macOS Runner Logs
uses: actions/download-artifact@v2
with:
name: osx_x64_logs
path: osx_x64_logs
- name: Download Linux ARM64 Runner Logs
uses: actions/download-artifact@v2
with:
name: linux_arm64_logs
path: linux_arm64_logs
- name: Download Windows Runner Logs
uses: actions/download-artifact@v2
with:
name: win_x64_logs
path: win_x64_logs
- name: Check Runner Logs
run: |
function failed()
{
local error=${1:-Undefined error}
echo "Failed: $error" >&2
exit 1
}
grep -R "completed with result: Succeeded" ./win_x64_logs || failed "Windows Runner fail to run the job, please check logs"
grep -R "completed with result: Succeeded" ./linux_x64_logs || failed "Linux Runner fail to run the job, please check logs"
grep -R "completed with result: Succeeded" ./osx_x64_logs || failed "macOS Runner fail to run the job, please check logs"
grep -R "completed with result: Succeeded" ./linux_arm64_logs || failed "Linux ARM64 Runner fail to run the job, please check logs"

View File

@@ -5,7 +5,7 @@ on:
push:
paths:
- releaseVersion
jobs:
check:
if: startsWith(github.ref, 'refs/heads/releases/') || github.ref == 'refs/heads/main'
@@ -13,8 +13,8 @@ jobs:
steps:
- uses: actions/checkout@v2
# Make sure ./releaseVersion match ./src/runnerversion
# Query GitHub release ensure version is not used
# Make sure ./releaseVersion match ./src/runnerversion
# Query GitHub release ensure version is not used
- name: Check version
uses: actions/github-script@0.3.0
with:
@@ -42,7 +42,7 @@ jobs:
throw e
}
}
build:
needs: check
outputs:
@@ -72,7 +72,7 @@ jobs:
devScript: ./dev.sh
- runtime: win-x64
os: windows-2019
os: windows-latest
devScript: ./dev
runs-on: ${{ matrix.os }}
@@ -152,7 +152,7 @@ jobs:
releaseNote = releaseNote.replace(/<LINUX_ARM64_SHA>/g, '${{needs.build.outputs.linux-arm64-sha}}')
console.log(releaseNote)
core.setOutput('version', runnerVersion);
core.setOutput('note', releaseNote);
core.setOutput('note', releaseNote);
# Create GitHub release
- uses: actions/create-release@master
id: createRelease
@@ -164,7 +164,6 @@ jobs:
release_name: "v${{ steps.releaseNote.outputs.version }}"
body: |
${{ steps.releaseNote.outputs.note }}
prerelease: true
# Upload release assets
- name: Upload Release Asset (win-x64)

View File

@@ -0,0 +1,31 @@
name: Runner Basics Test Case
on:
workflow_dispatch:
inputs:
target_runner:
description: 'Self-hosted runner will run the job'
required: true
jobs:
test:
runs-on:
- self-hosted
- ${{github.event.inputs.target_runner}}
name: Runner Basic Test 🛠
steps:
- uses: actions/checkout@v2
- name: Run a one-line script
run: echo Hello, world!
- name: Run a multi-line script
shell: bash
run: |
printenv|sort
cat $GITHUB_EVENT_PATH
- name: Validate GitHub Context
shell: bash
run: |
declare -a context_vars=("GITHUB_ACTION" "GITHUB_ACTIONS" "GITHUB_REPOSITORY" "GITHUB_WORKSPACE" "GITHUB_SHA" "GITHUB_RUN_ID" "GITHUB_RUN_NUMBER")
for var in ${context_vars[@]};
do [ -z "${!var}" ] && echo "##[error]$var not found" && exit 1 || echo "$var: ${!var}"; done

2
.vscode/launch.json vendored
View File

@@ -54,4 +54,4 @@
"requireExactSource": false,
},
],
}
}

View File

@@ -5,7 +5,7 @@
# GitHub Actions Runner
[![Actions Status](https://github.com/actions/runner/workflows/Runner%20CI/badge.svg)](https://github.com/actions/runner/actions)
[![Runner E2E Test](https://github.com/actions-canary/actions-runner-e2e/actions/workflows/runner_e2etest.yml/badge.svg)](https://github.com/actions-canary/actions-runner-e2e/actions/workflows/runner_e2etest.yml)
[![Runner E2E Test](https://github.com/actions/runner/workflows/Runner%20E2E%20Test/badge.svg)](https://github.com/actions/runner/actions)
The runner is the application that runs a job from a GitHub Actions workflow. It is used by GitHub Actions in the [hosted virtual environments](https://github.com/actions/virtual-environments), or you can [self-host the runner](https://help.github.com/en/actions/automating-your-workflow-with-github-actions/about-self-hosted-runners) in your own environment.

View File

@@ -1,92 +0,0 @@
**Date**: 2021-06-10
**Status**: Accepted
## Context
We released [composite run steps](https://github.com/actions/runner/pull/554) last year which started our journey of reusing steps across different workflow files. To continue that journey, we want to expand composite run steps into composite actions.
We want to support the `uses` steps from workflows in composite actions, including:
- Container actions
- Javascript actions
- Other Composite actions (up to a limit of course!)
- The pre and post steps these actions can generate
## Guiding Principles
- Composite Actions should function as a single step or action, no matter how many steps it is composed of or how many levels of recursion it has
- In the future we may add a configurable option to make this no longer the case
- A workflow author should not need to understand the inner workings of a composite action in order to use it
- Composite actions should leverage inputs to get values they need, they will not have full access to the `context` objects. The secrets context will **not** be available to composite actions, users will need to pass these values in as an input.
- Other Actions should **just work** inside a composite action, without any code changes
## Decisions
### Composite Recursion Limit
- We will start with supporting a recursion limit of `10` composite actions deep
- We are free to bump this limit in the future, the code will be written to just require updating a variable. If the graph evaluates beyond the recursion limit, the job will fail in the pre-job phase (The `Set up job` step).
- A composite actions interface is its inputs and outputs, nothing else is carried over when invoking recursively.
### Pre/Post Steps in nested Actions
- We do not plan on adding the ability to configure a customizable pre or post step for composite actions at this time. However, we will execute the pre and post steps of any actions referenced in a composite action.
- Composite actions will generate a single pre-step and post-step for the entire composite action, even if there are multiple pre-steps and post-steps in the referenced actions.
- These steps will execute following the same ordering rules we have today, first to run has their pre step run first and their post step run last.
- For example, if you had a composite action with two pre steps and two posts steps:
```
- uses: action1
- uses: composite1
- uses: action2
```
The order of execution would be:
```
- prestep-action1
- prestep-composite1
- prestep-composite1-first-action-referenced
- prestep-composite1-second-action-referenced
- prestep-action2
- the job steps
- poststep-action2
- poststep-composite1
- poststep-composite1-the-second-action-referenced
- poststep-composite1-first-action-referenced
- poststep-action1
```
#### Set-state
- While the composite action has an individual combined pre/post action, the `set-state` command will not be shared.
- If the `set-state` command is used during a composite step, only the action that originally called `set-state` will have access to the env variable during the post run step.
- This prevents multiple actions that set the same state from interfering with the execution of another action's post step.
### Resolve Action Endpoint changes
- The resolve actions endpoint will now validate policy to ensure that the given workflow run has access to download that action.
- Older GHES/GHAE customers with newer runners will be locked out of composite uses steps until they upgrade their instance.
### Local actions
- Local actions will expand the tree, perform policy checks, and download actions Just in Time when the step is running.
- Like current local actions, we will not support presteps. If an action is running local, by the time we know that, the time to run presteps have already passed.
### If, continue-on-error, timeout-minutes - Not being considered at this time
- `if`, `continue-on-error`, `timeout-minutes` could be supported in composite run/uses steps. These values were not originally supported in our composite run steps implementation.
- Browsing the community forums and runner repo, there hasn't been a lot of noise asking for these features, so we will hold off on them.
- These values passed as input into the composite action will **not** be carried over as input into the individual steps the composite action runs.
### Defaults - Not being considered at this time
- In actions, we have the idea of [defaults](https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#defaultsrun) , which allow you to specify a shell and working directory in one location, rather then on each step.
- However, `shell` is currently required in composite run steps
- In regular run steps, it is optional, and defaults to a different value based on the OS.
- We want to prioritize the right experience for the consumer, and make the action author continue to explicitly set these values. We can consider improving this experience in the future.
## Consequences
- Workflows are now more reusable across multiple workflow files
- Composite actions implement most of the existing workflow run steps, with room to expand these in the future
- Feature flags will control this rollout

View File

@@ -1,71 +0,0 @@
# ADR 1438: Support Conditionals In Composite Actions
**Date**: 2021-10-13
**Status**: Accepted
## Context
We recently shipped composite actions, which allows you to reuse individual steps inside an action.
However, one of the [most requested features](https://github.com/actions/runner/issues/834) has been a way to support the `if` keyword.
### Goals
- We want to keep consistent with current behavior
- We want to support conditionals via the `if` keyword
- Our built in functions like `success` should be implementable without calling them, for example you can do `job.status == success` rather then `success()` currently.
### How does composite currently work?
Currently, we have limited conditional support in composite actions for `pre` and `post` steps.
These are based on the `job status`, and support keywords like `always()`, `failed()`, `success()` and `cancelled()`.
However, generic or main steps do **not** support conditionals.
By default, in a regular workflow, a step runs on the `success()` condition. Which looks at the **job** **status**, sees if it is successful and runs.
By default, in a composite action, main steps run until a single step fails in that composite action, then the composite action is halted early. It does **not** care about the job status.
Pre, and post steps in composite actions use the job status to determine if they should run.
### How do we go forward?
Well, if we think about what composite actions are currently doing when invoking main steps, they are checking if the current composite action is successful.
Lets formalize that concept into a "real" idea.
- We will add an `action_status` field to the github context to mimic the [job's context status](https://docs.github.com/en/actions/learn-github-actions/contexts#job-context).
- We have an existing concept that does this `action_path` which is only set for composite actions on the github context.
- In a composite action during a main step, the `success()` function will check if `action_status == success`, rather then `job_status == success`. Failure will work the same way.
- Pre and post steps in composite actions will not change, they will continue to check the job status.
### Nested Scenario
For nested composite actions, we will follow the existing behavior, you only care about your current composite action, not any parents.
For example, lets imagine a scenario with a simple nested composite action
```
- Job
- Regular Step
- Composite Action
- runs: exit 1
- if: always()
uses: A child composite action
- if: success()
runs: echo "this should print"
- runs: echo "this should also print"
- if: success()
runs: echo "this will not print as the current composite action has failed already"
```
The child composite actions steps should run in this example, the child composite action has not yet failed, so it should run all steps until a step fails. This is consistent with how a composite action currently works in production if the main job fails but a composite action is invoked with `if:always()` or `if: failure()`
### Other options explored
We could add the `current_step_status` to the job context rather then `__status` to the steps context, however this comes with two major downsides:
- We need to support the field for every type of step, because its non trivial to remove a field from the job context once it has been added (its readonly)
- For all actions besides composite it would only every be `success`
- Its weird to have a `current_step` value on the job context
- We also explored a `__status` on the steps context.
- The `__` is required to prevent us from colliding with a step with id: status
- This felt wrong because the naming was not smooth, and did not fit into current conventions.
### Consequences
- github context has a new field for the status of the current composite action.
- We support conditional's in composite actions
- We keep the existing behavior for all users, but allow them to expand that functionality.

View File

@@ -21,33 +21,16 @@ export RUNNER_CFG_PAT=yourPAT
:point_right: [Sample script here](../scripts/create-latest-svc.sh) :point_left:
Run as a one-liner. NOTE: replace with yourorg/yourrepo (repo level) or just yourorg (org level)
Run as a one-liner. NOTE: replace with yourorg/yourrepo (repo level) or just yourorg (org level)
```bash
curl -s https://raw.githubusercontent.com/actions/runner/main/scripts/create-latest-svc.sh | bash -s yourorg/yourrepo
```
You can call the script with additional arguments:
```bash
# Usage:
# export RUNNER_CFG_PAT=<yourPAT>
# ./create-latest-svc -s scope -g [ghe_domain] -n [name] -u [user] -l [labels]
# -s required scope: repo (:owner/:repo) or org (:organization)
# -g optional ghe_hostname: the fully qualified domain name of your GitHub Enterprise Server deployment
# -n optional name of the runner, defaults to hostname
# -u optional user svc will run as, defaults to current
# -l optional list of labels (split by comma) applied on the runner"
```
Use `--` to pass any number of optional named parameters:
```
curl -s https://raw.githubusercontent.com/actions/runner/main/scripts/create-latest-svc.sh | bash -s -- -s myorg/myrepo -n myname -l label1,label2
```
### Why can't I use a container?
The runner is installed as a service using `systemd` and `systemctl`. Docker does not support `systemd` for service configuration on a container.
## Uninstall running as service
## Uninstall running as service
**Scenario**: Run on a machine or VM ([not container](#why-cant-i-use-a-container)) which automates:
@@ -57,7 +40,7 @@ The runner is installed as a service using `systemd` and `systemctl`. Docker doe
:point_right: [Sample script here](../scripts/remove-svc.sh) :point_left:
Repo level one liner. NOTE: replace with yourorg/yourrepo (repo level) or just yourorg (org level)
Repo level one liner. NOTE: replace with yourorg/yourrepo (repo level) or just yourorg (org level)
```bash
curl -s https://raw.githubusercontent.com/actions/runner/main/scripts/remove-svc.sh | bash -s yourorg/yourrepo
```

View File

@@ -18,16 +18,16 @@ Make sure the runner has access to actions service for GitHub.com or GitHub Ente
- DNS lookup for api.github.com or myGHES.com using dotnet
- Ping api.github.com or myGHES.com using dotnet
- Make HTTP GET to https://api.github.com or https://myGHES.com/api/v3 using dotnet, check response headers contains `X-GitHub-Request-Id`
- Make HTTP GET to https://api.github.com or https://myGHES.com/api/v3 using dotnet, check response headers contains `X-GitHub-Request-Id`
---
- DNS lookup for vstoken.actions.githubusercontent.com using dotnet
- Ping vstoken.actions.githubusercontent.com using dotnet
- Make HTTP GET to https://vstoken.actions.githubusercontent.com/_apis/health or https://myGHES.com/_services/vstoken/_apis/health using dotnet, check response headers contains `x-vss-e2eid`
- Make HTTP GET to https://vstoken.actions.githubusercontent.com/_apis/health or https://myGHES.com/_services/vstoken/_apis/health using dotnet, check response headers contains `x-vss-e2eid`
---
- DNS lookup for pipelines.actions.githubusercontent.com using dotnet
- Ping pipelines.actions.githubusercontent.com using dotnet
- Make HTTP GET to https://pipelines.actions.githubusercontent.com/_apis/health or https://myGHES.com/_services/pipelines/_apis/health using dotnet, check response headers contains `x-vss-e2eid`
- Make HTTP POST to https://pipelines.actions.githubusercontent.com/_apis/health or https://myGHES.com/_services/pipelines/_apis/health using dotnet, check response headers contains `x-vss-e2eid`
- Make HTTP GET to https://pipelines.actions.githubusercontent.com/_apis/health or https://myGHES.com/_services/pipelines/_apis/health using dotnet, check response headers contains `x-vss-e2eid`
- Make HTTP POST to https://pipelines.actions.githubusercontent.com/_apis/health or https://myGHES.com/_services/pipelines/_apis/health using dotnet, check response headers contains `x-vss-e2eid`
## How to fix the issue?
@@ -42,4 +42,4 @@ Make sure the runner has access to actions service for GitHub.com or GitHub Ente
## Still not working?
Contact GitHub customer service or log an issue at https://github.com/actions/runner if you think it's a runner issue.
Contact GitHub customer service or log an issue at https://github.com/actions/runner if you think it's a runner issue.

View File

@@ -31,4 +31,4 @@ The test also set environment variable `GIT_TRACE=1` and `GIT_CURL_VERBOSE=1` be
## Still not working?
Contact GitHub customer service or log an issue at https://github.com/actions/runner if you think it's a runner issue.
Contact GitHub customer service or log an issue at https://github.com/actions/runner if you think it's a runner issue.

View File

@@ -13,7 +13,7 @@ Even the runner is configured to GitHub Enterprise Server, the runner can still
- DNS lookup for api.github.com using dotnet
- Ping api.github.com using dotnet
- Make HTTP GET to https://api.github.com using dotnet, check response headers contains `X-GitHub-Request-Id`
- Make HTTP GET to https://api.github.com using dotnet, check response headers contains `X-GitHub-Request-Id`
## How to fix the issue?
@@ -23,4 +23,4 @@ Even the runner is configured to GitHub Enterprise Server, the runner can still
## Still not working?
Contact GitHub customer service or log an issue at https://github.com/actions/runner if you think it's a runner issue.
Contact GitHub customer service or log an issue at https://github.com/actions/runner if you think it's a runner issue.

View File

@@ -2,19 +2,17 @@
### Common things that can cause the runner to not working properly
- A bug in the runner or the dotnet framework that causes the actions runner to be unable to make Http requests in a certain network environment.
- Bug in the runner or the dotnet framework that causes actions runner can't make Http request in a certain network environment.
- A Proxy or Firewall may block certain HTTP method, such as blocking all POST and PUT calls which the runner will use to upload logs.
- Proxy/Firewall block certain HTTP method, like it block all POST and PUT calls which the runner will use to upload logs.
- A Proxy or Firewall may only allows requests with certain user-agent to pass through and the actions runner user-agent is not in the allow list.
- Proxy/Firewall only allows requests with certain user-agent to pass through and the actions runner user-agent is not in the allow list.
- A Proxy try to decrypt and exam HTTPS traffic for security purpose but cause the actions-runner to fail to finish SSL handshake due to the lack of trusting proxy's CA.
- Proxy try to decrypt and exam HTTPS traffic for security purpose but cause the actions-runner to fail to finish SSL handshake due to the lack of trusting proxy's CA.
- The SSL handshake may fail if the client and server do not support the same TLS version, or the same cipher suites.
- Proxy try to modify the HTTPS request (like add or change some http headers) and causes the request become incompatible with the Actions Service (ASP.NetCore), Ex: [Nginx](https://github.com/dotnet/aspnetcore/issues/17081)
- A Proxy may try to modify the HTTPS request (like add or change some http headers) and causes the request become incompatible with the Actions Service (ASP.NetCore), Ex: [Nginx](https://github.com/dotnet/aspnetcore/issues/17081)
- Firewall rules that block action runner from accessing certain hosts, ex: `*.github.com`, `*.actions.githubusercontent.com`, etc
- Firewall rules that block action runner from accessing certain hosts, ex: `*.github.com`, `*.actions.githubusercontent.com`, etc.
### Identify and solve these problems
@@ -25,38 +23,10 @@ Use a 3rd party tool to make the same requests as the runner did would be a good
- Use `nslookup` to check DNS
- Use `ping` to check Ping
- Use `traceroute`, `tracepath`, or `tracert` to check the network route between the runner and the Actions service
- Use `traceroute`, `tracepath`, or `tracert` to check the network route between the runner and the Actions service
- Use `curl -v` to check the network stack, good for verifying default certificate/proxy settings.
- Use `Invoke-WebRequest` from `pwsh` (`PowerShell Core`) to check the dotnet network stack, good for verifying bugs in the dotnet framework.
If the 3rd party tool is also experiencing the same error as the runner does, then you might want to contact your network administrator for help.
Otherwise, contact GitHub customer support or log an issue at https://github.com/actions/runner
### Troubleshooting: Why can't I configure a runner?
If you are having trouble connecting, try these steps:
1. Validate you can reach our endpoints from your web browser. If not, double check your local network connection
- For hosted Github:
- https://api.github.com/
- https://vstoken.actions.githubusercontent.com/_apis/health
- https://pipelines.actions.githubusercontent.com/_apis/health
- For GHES/GHAE
- https://myGHES.com/_services/vstoken/_apis/health
- https://myGHES.com/_services/pipelines/_apis/health
- https://myGHES.com/api/v3
2. Validate you can reach those endpoints in powershell core
- The runner runs on .net core, lets validate the local settings for that stack
- Open up `pwsh`
- Run the command using the urls above `Invoke-WebRequest {url}`
3. If not, get a packet trace using a tool like wireshark and start looking at the TLS handshake.
- If you see a Client Hello followed by a Server RST:
- You may need to configure your TLS settings to use the correct version
- You should support TLS version 1.2 or later
- You may need to configure your TLS settings to have up to date cipher suites, this may be solved by system updates and patches.
- Most notably, on windows server 2012 make sure [the tls cipher suite update](https://support.microsoft.com/en-us/topic/update-adds-new-tls-cipher-suites-and-changes-cipher-suite-priorities-in-windows-8-1-and-windows-server-2012-r2-8e395e43-c8ef-27d8-b60c-0fc57d526d94) is installed
- Your firewall, proxy or network configuration may be blocking the connection
- You will want to reach out to whoever is in charge of your network with these pcap files to further troubleshoot
- If you see a failure later in the handshake:
- Try the fix in the [SSLCert Fix](./sslcert.md)
Otherwise, contact GitHub customer support or log an issue at https://github.com/actions/runner

View File

@@ -27,4 +27,4 @@ All javascript base Actions will get executed by the built-in `node` at `<runner
## Still not working?
Contact GitHub customer service or log an issue at https://github.com/actions/runner if you think it's a runner issue.
Contact GitHub customer service or log an issue at https://github.com/actions/runner if you think it's a runner issue.

View File

@@ -12,7 +12,7 @@ As long as your certificate is generated properly, most of the issues should be
> !!! DO NOT SKIP SSL CERT VALIDATION !!!
> !!! IT IS A BAD SECURITY PRACTICE !!!
### Download SSL certificate chain
### Download SSL certificate chain
Depends on how your SSL server certificate gets configured, you might need to download the whole certificate chain from a machine that has trusted the SSL certificate's CA.
@@ -28,7 +28,7 @@ The actions runner is a dotnet core application which will follow how dotnet loa
You can get full details documentation at [here](https://docs.microsoft.com/en-us/dotnet/standard/security/cross-platform-cryptography#x509store)
In short:
In short:
- Windows: Load from Windows certificate store.
- Linux: Load from OpenSSL CA cert bundle.
- macOS: Load from macOS KeyChain.
@@ -43,13 +43,13 @@ To let the runner trusts your CA certificate, you will need to:
1. RedHat: https://www.redhat.com/sysadmin/ca-certificates-cli
2. Ubuntu: http://manpages.ubuntu.com/manpages/focal/man8/update-ca-certificates.8.html
3. Google search: "trust ca certificate on [linux distribution]"
4. If all approaches failed, set environment variable `SSL_CERT_FILE` to the CA bundle `.pem` file we get.
> To verify cert gets installed properly on Linux, you can try use `curl -v https://sitewithsslissue.com` and `pwsh -Command \"Invoke-WebRequest -Uri https://sitewithsslissue.com\"`
4. If all approaches failed, set environment variable `SSL_CERT_FILE` to the CA bundle `.pem` file we get.
> To verity cert gets installed properly on Linux, you can try use `curl -v https://sitewithsslissue.com` and `pwsh -Command \"Invoke-WebRequest -Uri https://sitewithsslissue.com\"`
### Trust CA certificate for Git CLI
Git uses various CA bundle file depends on your operation system.
- Git packaged the CA bundle file within the Git installation on Windows
- Git packaged the CA bundle file within the Git installation on Windows
- Git use OpenSSL certificate CA bundle file on Linux and macOS
You can check where Git check CA file by running:

View File

@@ -12,7 +12,7 @@ Issues in this repository should be for the runner application. Note that the V
## Enhancements and Feature Requests
We ask that before significant effort is put into code changes, that we have agreement on taking the change before time is invested in code changes.
We ask that before significant effort is put into code changes, that we have agreement on taking the change before time is invested in code changes.
1. Create a feature request. Once agreed we will take the enhancement
2. Create an ADR to agree on the details of the change.
@@ -46,9 +46,9 @@ Tip: Make sure your job can run on this runner. The easiest way is to set `runs-
## Development Life Cycle
If you're using VS Code, you can follow [these](contribute/vscode.md) steps instead.
If you're using VS Code, you can follow [these](contribute/vscode.md) steps instead.
### To Build, Test, Layout
### To Build, Test, Layout
Navigate to the `src` directory and run the following command:

View File

@@ -4,7 +4,7 @@ These examples use VS Code, but the idea should be similar across all IDEs as lo
## Configure
To successfully start the runner, you need to register it using a repository and a runner registration token.
Run `Configure` first to build the source code and set up the runner in `_layout`.
Run `Configure` first to build the source code and set up the runner in `_layout`.
Once it's done creating `_layout`, it asks for the url of your repository and your token in the terminal.
Check [Quickstart](../contribute.md#quickstart-run-a-job-from-a-real-repository) if you don't know how to get this token.
@@ -34,7 +34,7 @@ All the configs below can be found in `.vscode/launch.json`.
If you launch `Run` or `Run [build]`, it starts a process called `Runner.Listener`.
This process will receive any job queued on this repository if the job runs on matching labels (e.g `runs-on: self-hosted`).
Once a job is received, a `Runner.Listener` starts a new process of `Runner.Worker`.
Once a job is received, a `Runner.Listener` starts a new process of `Runner.Worker`.
Since this is a diferent process, you can't use the same debugger session debug it.
Instead, a parallel debugging session has to be started, using a different launch config.
Luckily, VS Code supports multiple parallel debugging sessions.
@@ -45,7 +45,7 @@ Because the worker process is usually started by the listener instead of an IDE,
For this reason, `Runner.Worker` can be configured to wait for a debugger to be attached before it begins any actual work.
Set the environment variable `GITHUB_ACTIONS_RUNNER_ATTACH_DEBUGGER` to `true` or `1` to enable this wait.
All worker processes now will wait 20 seconds before they start working on their task.
All worker processes now will wait 20 seconds before they start working on their task.
This gives enough time to attach a debugger by running `Debug Worker`.
If for some reason you have multiple workers running, run the launch config `Attach` instead.

View File

@@ -58,4 +58,4 @@ Authentication in a workflow run to github.com can be accomplished by using the
Hosted runner authentication differs from self-hosted authentication in that runners do not undergo a registration process, but instead, the hosted runners get the OAuth token directly by reading the `.credentials` file. The scope of this particular token is limited for a given workflow job execution, and the token is revoked as soon as the job is finished.
![Hosted runner config and start](../res/hosted-config-start.png)
![Hosted runner config and start](../res/hosted-config-start.png)

View File

@@ -27,7 +27,7 @@ Dependencies is missing for Dotnet Core 3.0
Execute ./bin/installdependencies.sh to install any missing Dotnet Core 3.0 dependencies.
```
You can easily correct the problem by executing `./bin/installdependencies.sh`.
The `installdependencies.sh` script should install all required dependencies on all supported Linux versions
The `installdependencies.sh` script should install all required dependencies on all supported Linux versions
> Note: The `installdependencies.sh` script will try to use the default package management mechanism on your Linux flavor (ex. `yum`/`apt-get`/`apt`).
### Full dependencies list
@@ -35,15 +35,15 @@ The `installdependencies.sh` script should install all required dependencies on
Debian based OS (Debian, Ubuntu, Linux Mint)
- liblttng-ust0
- libkrb5-3
- libkrb5-3
- zlib1g
- libssl1.1, libssl1.0.2 or libssl1.0.0
- libicu63, libicu60, libicu57 or libicu55
Fedora based OS (Fedora, Red Hat Enterprise Linux, CentOS, Oracle Linux 7)
- lttng-ust
- openssl-libs
- lttng-ust
- openssl-libs
- krb5-libs
- zlib
- libicu

View File

@@ -5,7 +5,7 @@
## Supported Versions
- macOS High Sierra (10.13) and later versions
## Apple Silicon M1
The runner is currently not supported on devices with an Apple M1 chip.

View File

@@ -1,17 +1,17 @@
## Features
- n/a
- Use GITHUB_TOKEN for ghcr.io containers if credentials are not provided (#990)
## Bugs
- Fixed an issue where container environment variables names or values could escape the docker command (#2108)
- Sanitize Windows ENVs (#2280)
- Do not trucate error message from template evaluation (#1038)
- Make FileShare ReadWrite (#1033)
- Mask secrets with double-quotes when passed to docker command line (#1002)
- Delete script files before replacing during update (#984)
## Misc
- n/a
## Windows x64
We recommend configuring the runner in a root folder of the Windows drive (e.g. "C:\actions-runner"). This will help avoid issues related to service identity folder permissions and long file path restrictions on Windows.
@@ -23,7 +23,7 @@ mkdir \actions-runner ; cd \actions-runner
# Download the latest runner package
Invoke-WebRequest -Uri https://github.com/actions/runner/releases/download/v<RUNNER_VERSION>/actions-runner-win-x64-<RUNNER_VERSION>.zip -OutFile actions-runner-win-x64-<RUNNER_VERSION>.zip
# Extract the installer
Add-Type -AssemblyName System.IO.Compression.FileSystem ;
Add-Type -AssemblyName System.IO.Compression.FileSystem ;
[System.IO.Compression.ZipFile]::ExtractToDirectory("$PWD\actions-runner-win-x64-<RUNNER_VERSION>.zip", "$PWD")
```

View File

@@ -1 +1 @@
2.285.3
<Update to ./src/runnerversion when creating release>

View File

@@ -1,4 +1,4 @@
# Sample scripts for self-hosted runners
Here are some examples to work from if you'd like to automate your use of self-hosted runners.
See the docs [here](../docs/automate.md).
See the docs [here](../docs/automate.md).

View File

@@ -2,68 +2,36 @@
set -e
#
# Downloads latest releases (not pre-release) runner
# Configures as a service
#
# Examples:
# RUNNER_CFG_PAT=<yourPAT> ./create-latest-svc.sh myuser/myrepo my.ghe.deployment.net
# RUNNER_CFG_PAT=<yourPAT> ./create-latest-svc.sh myorg my.ghe.deployment.net
#
# Usage:
# export RUNNER_CFG_PAT=<yourPAT>
# ./create-latest-svc scope [ghe_domain] [name] [user] [labels]
#
# scope required repo (:owner/:repo) or org (:organization)
# ghe_domain optional the fully qualified domain name of your GitHub Enterprise Server deployment
# name optional defaults to hostname
# user optional user svc will run as. defaults to current
# labels optional list of labels (split by comma) applied on the runner
#
# Notes:
# PATS over envvars are more secure
# Downloads latest runner release (not pre-release)
# Configures it as a service more secure
# Should be used on VMs and not containers
# Works on OSX and Linux
# Assumes x64 arch
# See EXAMPLES below
#
flags_found=false
while getopts 's:g:n:u:l:' opt; do
flags_found=true
case $opt in
s)
runner_scope=$OPTARG
;;
g)
ghe_hostname=$OPTARG
;;
n)
runner_name=$OPTARG
;;
u)
svc_user=$OPTARG
;;
l)
labels=$OPTARG
;;
*)
echo "
Runner Service Installer
Examples:
RUNNER_CFG_PAT=<yourPAT> ./create-latest-svc.sh myuser/myrepo my.ghe.deployment.net
RUNNER_CFG_PAT=<yourPAT> ./create-latest-svc.sh -s myorg -u user_name -l label1,label2
Usage:
export RUNNER_CFG_PAT=<yourPAT>
./create-latest-svc scope [ghe_domain] [name] [user] [labels]
-s required scope: repo (:owner/:repo) or org (:organization)
-g optional ghe_hostname: the fully qualified domain name of your GitHub Enterprise Server deployment
-n optional name of the runner, defaults to hostname
-u optional user svc will run as, defaults to current
-l optional list of labels (split by comma) applied on the runner"
exit 0
;;
esac
done
shift "$((OPTIND - 1))"
if ! "$flags_found"; then
runner_scope=${1}
ghe_hostname=${2}
runner_name=${3:-$(hostname)}
svc_user=${4:-$USER}
labels=${5}
fi
# apply defaults
runner_name=${runner_name:-$(hostname)}
svc_user=${svc_user:-$USER}
runner_scope=${1}
ghe_hostname=${2}
runner_name=${3:-$(hostname)}
svc_user=${4:-$USER}
labels=${5}
echo "Configuring runner @ ${runner_scope}"
sudo echo
@@ -174,7 +142,7 @@ echo
echo "Configuring as a service ..."
prefix=""
if [ "${runner_plat}" == "linux" ]; then
prefix="sudo "
prefix="sudo "
fi
${prefix}./svc.sh install ${svc_user}

View File

@@ -51,7 +51,7 @@ fi
# Ensure offline
#--------------------------------------
runner_status=$(curl -s -X GET ${base_api_url}/${runner_scope}/actions/runners?per_page=100 -H "accept: application/vnd.github.everest-preview+json" -H "authorization: token ${RUNNER_CFG_PAT}" \
| jq -M -j ".runners | .[] | select(.name == \"${runner_name}\") | .status")
| jq -M -j ".runners | .[] | [select(.name == \"${runner_name}\")] | .[0].status")
if [ -z "${runner_status}" ]; then
fatal "Could not find runner with name ${runner_name}"
@@ -67,7 +67,7 @@ fi
# Get id of runner to remove
#--------------------------------------
runner_id=$(curl -s -X GET ${base_api_url}/${runner_scope}/actions/runners?per_page=100 -H "accept: application/vnd.github.everest-preview+json" -H "authorization: token ${RUNNER_CFG_PAT}" \
| jq -M -j ".runners | .[] | select(.name == \"${runner_name}\") | .id")
| jq -M -j ".runners | .[] | [select(.name == \"${runner_name}\")] | .[0].id")
if [ -z "${runner_id}" ]; then
fatal "Could not find runner with name ${runner_name}"

View File

@@ -1291,9 +1291,9 @@
}
},
"glob-parent": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
"integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.1.tgz",
"integrity": "sha512-FnI+VGOpnlGHWZxthPGR+QhR78fuiK0sNLkHQv+bL9fQi57lNNdquIbna/WrfROrolq8GK5Ek6BiMwqL/voRYQ==",
"dev": true,
"requires": {
"is-glob": "^4.0.1"
@@ -1374,9 +1374,9 @@
"dev": true
},
"hosted-git-info": {
"version": "2.8.9",
"resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz",
"integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==",
"version": "2.8.8",
"resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.8.tgz",
"integrity": "sha512-f/wzC2QaWBs7t9IYqB4T3sR1xviIViXJRJTWBlx2Gf3g0Xi5vI7Yy4koXQ1c9OYDGHN9sBy1DQ2AB8fqZBWhUg==",
"dev": true
},
"iconv-lite": {
@@ -1683,9 +1683,9 @@
}
},
"lodash": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
"integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
"version": "4.17.19",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.19.tgz",
"integrity": "sha512-JNvd8XER9GQX0v2qJgsaN/mzFCNA5BRe/j8JN9d+tWyGLSodKQHKFicdwNYzWwI3wjRnaKPsGj1XkBjx/F96DQ==",
"dev": true
},
"lodash.unescape": {
@@ -1947,9 +1947,9 @@
"dev": true
},
"path-parse": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
"integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==",
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz",
"integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==",
"dev": true
},
"path-type": {

View File

@@ -4,7 +4,6 @@ PRECACHE=$2
NODE_URL=https://nodejs.org/dist
NODE12_VERSION="12.13.1"
NODE16_VERSION="16.13.0"
get_abs_path() {
# exploits the fact that pwd will print abs path when no args
@@ -127,8 +126,6 @@ function acquireExternalTool() {
if [[ "$PACKAGERUNTIME" == "win-x64" || "$PACKAGERUNTIME" == "win-x86" ]]; then
acquireExternalTool "$NODE_URL/v${NODE12_VERSION}/$PACKAGERUNTIME/node.exe" node12/bin
acquireExternalTool "$NODE_URL/v${NODE12_VERSION}/$PACKAGERUNTIME/node.lib" node12/bin
acquireExternalTool "$NODE_URL/v${NODE16_VERSION}/$PACKAGERUNTIME/node.exe" node16/bin
acquireExternalTool "$NODE_URL/v${NODE16_VERSION}/$PACKAGERUNTIME/node.lib" node16/bin
if [[ "$PRECACHE" != "" ]]; then
acquireExternalTool "https://github.com/microsoft/vswhere/releases/download/2.6.7/vswhere.exe" vswhere
fi
@@ -137,23 +134,18 @@ fi
# Download the external tools only for OSX.
if [[ "$PACKAGERUNTIME" == "osx-x64" ]]; then
acquireExternalTool "$NODE_URL/v${NODE12_VERSION}/node-v${NODE12_VERSION}-darwin-x64.tar.gz" node12 fix_nested_dir
acquireExternalTool "$NODE_URL/v${NODE16_VERSION}/node-v${NODE16_VERSION}-darwin-x64.tar.gz" node16 fix_nested_dir
fi
# Download the external tools for Linux PACKAGERUNTIMEs.
if [[ "$PACKAGERUNTIME" == "linux-x64" ]]; then
acquireExternalTool "$NODE_URL/v${NODE12_VERSION}/node-v${NODE12_VERSION}-linux-x64.tar.gz" node12 fix_nested_dir
acquireExternalTool "https://vstsagenttools.blob.core.windows.net/tools/nodejs/${NODE12_VERSION}/alpine/x64/node-${NODE12_VERSION}-alpine-x64.tar.gz" node12_alpine
acquireExternalTool "$NODE_URL/v${NODE16_VERSION}/node-v${NODE16_VERSION}-linux-x64.tar.gz" node16 fix_nested_dir
acquireExternalTool "https://vstsagenttools.blob.core.windows.net/tools/nodejs/${NODE16_VERSION}/alpine/x64/node-v${NODE16_VERSION}-alpine-x64.tar.gz" node16_alpine
fi
if [[ "$PACKAGERUNTIME" == "linux-arm64" ]]; then
acquireExternalTool "$NODE_URL/v${NODE12_VERSION}/node-v${NODE12_VERSION}-linux-arm64.tar.gz" node12 fix_nested_dir
acquireExternalTool "$NODE_URL/v${NODE16_VERSION}/node-v${NODE16_VERSION}-linux-arm64.tar.gz" node16 fix_nested_dir
fi
if [[ "$PACKAGERUNTIME" == "linux-arm" ]]; then
acquireExternalTool "$NODE_URL/v${NODE12_VERSION}/node-v${NODE12_VERSION}-linux-armv7l.tar.gz" node12 fix_nested_dir
acquireExternalTool "$NODE_URL/v${NODE16_VERSION}/node-v${NODE16_VERSION}-linux-armv7l.tar.gz" node16 fix_nested_dir
fi

View File

@@ -82,8 +82,7 @@ var gracefulShutdown = function (code) {
console.log('Sending SIGINT to runner listener to stop');
listener.kill('SIGINT');
console.log('Sending SIGKILL to runner listener');
setTimeout(() => listener.kill('SIGKILL'), 30000);
// TODO wait for 30 seconds and send a SIGKILL
}
}

View File

@@ -25,7 +25,5 @@
</dict>
<key>ProcessType</key>
<string>Interactive</string>
<key>SessionCreate</key>
<true/>
</dict>
</plist>

View File

@@ -94,6 +94,7 @@ then
fi
}
# libssl version prefer: libssl1.1 -> libssl1.0.2 -> libssl1.0.0
apt_get_with_fallbacks libssl1.1$ libssl1.0.2$ libssl1.0.0$
if [ $? -ne 0 ]
then
@@ -102,7 +103,8 @@ then
exit 1
fi
apt_get_with_fallbacks libicu72 libicu71 libicu70 libicu69 libicu68 libicu67 libicu66 libicu65 libicu63 libicu60 libicu57 libicu55 libicu52
# libicu version prefer: libicu66 -> libicu63 -> libicu60 -> libicu57 -> libicu55 -> libicu52
apt_get_with_fallbacks libicu66 libicu63 libicu60 libicu57 libicu55 libicu52
if [ $? -ne 0 ]
then
echo "'$apt_get' failed with exit code '$?'"

View File

@@ -106,30 +106,18 @@ function stop()
function uninstall()
{
if service_exists; then
stop
systemctl disable ${SVC_NAME} || failed "failed to disable ${SVC_NAME}"
rm "${UNIT_PATH}" || failed "failed to delete ${UNIT_PATH}"
else
echo "Service ${SVC_NAME} is not installed"
fi
stop
systemctl disable ${SVC_NAME} || failed "failed to disable ${SVC_NAME}"
rm "${UNIT_PATH}" || failed "failed to delete ${UNIT_PATH}"
if [ -f "${CONFIG_PATH}" ]; then
rm "${CONFIG_PATH}" || failed "failed to delete ${CONFIG_PATH}"
fi
systemctl daemon-reload || failed "failed to reload daemons"
}
function service_exists() {
if [ -f "${UNIT_PATH}" ]; then
return 0
else
return 1
fi
}
function status()
{
if service_exists; then
if [ -f "${UNIT_PATH}" ]; then
echo
echo "${UNIT_PATH}"
else

View File

@@ -18,8 +18,6 @@ downloadrunnerversion=_DOWNLOAD_RUNNER_VERSION_
logfile="_UPDATE_LOG_"
restartinteractiverunner=_RESTART_INTERACTIVE_RUNNER_
telemetryfile="$rootfolder/_diag/.telemetry"
# log user who run the script
date "+[%F %T-%4N] --------whoami--------" >> "$logfile" 2>&1
whoami >> "$logfile" 2>&1
@@ -120,58 +118,6 @@ then
exit 1
fi
# fix upgrade issue with macOS when running as a service
attemptedtargetedfix=0
currentplatform=$(uname | awk '{print tolower($0)}')
if [[ "$currentplatform" == 'darwin' && restartinteractiverunner -eq 0 ]]; then
# We needed a fix for https://github.com/actions/runner/issues/743
# We will recreate the ./externals/node12/bin/node of the past runner version that launched the runnerlistener service
# Otherwise mac gatekeeper kills the processes we spawn on creation as we are running a process with no backing file
# We need the pid for the nodejs loop, get that here, its the parent of the runner C# pid
# assumption here is only one process is invoking rootfolder/runsvc.sh
procgroup=$(ps x -o pgid,command | grep "$rootfolder/runsvc.sh" | grep -v grep | awk '{print $1}')
if [[ $? -eq 0 && -n "$procgroup" ]]
then
# inspect the open file handles to find the node process
# we can't actually inspect the process using ps because it uses relative paths and doesn't follow symlinks
path=$(lsof -a -g "$procgroup" -F n | grep node12/bin/node | grep externals | tail -1 | cut -c2-)
if [[ $? -eq 0 && -n "$path" ]]
then
# trim the last 5 characters of the path '/node'
trimmedpath=$(dirname "$path")
if [[ $? -eq 0 && -n "$trimmedpath" ]]
then
attemptedtargetedfix=1
# Create the path if it does not exist
if [[ ! -e "$path" ]]
then
date "+[%F %T-%4N] Creating fallback node at path $path" >> "$logfile" 2>&1
mkdir -p "$trimmedpath"
cp "$rootfolder/externals/node12/bin/node" "$path"
else
date "+[%F %T-%4N] Path for fallback node exists, skipping creating $path" >> "$logfile" 2>&1
fi
else
date "+[%F %T-%4N] DarwinRunnerUpgrade: Failed to trim runner path. TrimmedPath: $trimmedpath, path: $path, pgid: $procgroup, root: $rootfolder" >> "$logfile" 2>&1
date "+[%F %T-%4N] DarwinRunnerUpgrade: Failed to trim runner path. TrimmedPath: $trimmedpath, path: $path, pgid: $procgroup, root: $rootfolder" >> "$telemetryfile" 2>&1
fi
else
date "+[%F %T-%4N] DarwinRunnerUpgrade: Failed to find runner path. Path: $path, pgid: $procgroup, root: $rootfolder" >> "$logfile" 2>&1
date "+[%F %T-%4N] DarwinRunnerUpgrade: Failed to find runner path. Path: $path, pgid: $procgroup, root: $rootfolder" >> "$telemetryfile" 2>&1
fi
else
runproc=$(ps x -o pgid,command | grep "run.sh" | grep -v grep | awk '{print $1}')
if [[ $? -eq 0 && -n "$runproc" ]]
then
date "+[%F %T-%4N] Running as ephemeral using run.sh, no need to recreate node folder" >> "$logfile" 2>&1
else
date "+[%F %T-%4N] DarwinRunnerUpgrade: Failed to find runner pgid. pgid: $procgroup, root: $rootfolder" >> "$logfile" 2>&1
date "+[%F %T-%4N] DarwinRunnerUpgrade: Failed to find runner pgid. pgid: $procgroup, root: $rootfolder" >> "$telemetryfile" 2>&1
fi
fi
fi
date "+[%F %T-%4N] Update succeed" >> "$logfile"
# rename the update log file with %logfile%.succeed/.failed/succeedneedrestart

View File

@@ -43,21 +43,6 @@ else
else
sleep 5
fi
elif [[ $returnCode == 4 ]]; then
if [ ! -x "$(command -v sleep)" ]; then
if [ ! -x "$(command -v ping)" ]; then
COUNT="0"
while [[ $COUNT != 5000 ]]; do
echo "SLEEP" > /dev/null
COUNT=$[$COUNT+1]
done
else
ping -c 5 127.0.0.1 > /dev/null
fi
else
sleep 5
fi
"$DIR"/bin/Runner.Listener run $*
else
exit $returnCode
fi

View File

@@ -33,9 +33,6 @@ namespace GitHub.Runner.Common
[DataMember(EmitDefaultValue = false)]
public string PoolName { get; set; }
[DataMember(EmitDefaultValue = false)]
public bool Ephemeral { get; set; }
[DataMember(EmitDefaultValue = false)]
public string ServerUrl { get; set; }

View File

@@ -26,7 +26,6 @@ namespace GitHub.Runner.Common
Certificates,
Options,
SetupInfo,
Telemetry
}
public static class Constants
@@ -42,8 +41,6 @@ namespace GitHub.Runner.Common
public static string PluginTracePrefix = "##[plugin.trace]";
public static readonly int RunnerDownloadRetryMaxAttempts = 3;
public static readonly int CompositeActionsMaxDepth = 9;
// This enum is embedded within the Constants class to make it easier to reference and avoid
// ambiguous type reference with System.Runtime.InteropServices.OSPlatform and System.Runtime.InteropServices.Architecture
public enum OSPlatform
@@ -126,10 +123,9 @@ namespace GitHub.Runner.Common
{
public static readonly string Check = "check";
public static readonly string Commit = "commit";
public static readonly string Ephemeral = "ephemeral";
public static readonly string Help = "help";
public static readonly string Replace = "replace";
public static readonly string Once = "once"; // Keep this around since customers still relies on it
public static readonly string Once = "once";
public static readonly string RunAsService = "runasservice";
public static readonly string Unattended = "unattended";
public static readonly string Version = "version";
@@ -155,7 +151,6 @@ namespace GitHub.Runner.Common
public static readonly string LowDiskSpace = "LOW_DISK_SPACE";
public static readonly string UnsupportedCommand = "UNSUPPORTED_COMMAND";
public static readonly string UnsupportedCommandMessageDisabled = "The `{0}` command is disabled. Please upgrade to using Environment Files or opt into unsecure command execution by setting the `ACTIONS_ALLOW_UNSECURE_COMMANDS` environment variable to `true`. For more information see: https://github.blog/changelog/2020-10-01-github-actions-deprecating-set-env-and-add-path-commands/";
public static readonly string UnsupportedStopCommandTokenDisabled = "You cannot use a endToken that is an empty string, the string 'pause-logging', or another workflow command. For more information see: https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#example-stopping-and-starting-workflow-commands or opt into insecure command execution by setting the `ACTIONS_ALLOW_UNSECURE_STOPCOMMAND_TOKENS` environment variable to `true`.";
}
public static class RunnerEvent
@@ -215,7 +210,6 @@ namespace GitHub.Runner.Common
// Keep alphabetical
//
public static readonly string AllowUnsupportedCommands = "ACTIONS_ALLOW_UNSECURE_COMMANDS";
public static readonly string AllowUnsupportedStopCommandTokens = "ACTIONS_ALLOW_UNSECURE_STOPCOMMAND_TOKENS";
public static readonly string RunnerDebug = "ACTIONS_RUNNER_DEBUG";
public static readonly string StepDebug = "ACTIONS_STEP_DEBUG";
}

View File

@@ -51,7 +51,6 @@ namespace GitHub.Runner.Common
Add<T>(extensions, "GitHub.Runner.Worker.RemoveMatcherCommandExtension, Runner.Worker");
Add<T>(extensions, "GitHub.Runner.Worker.WarningCommandExtension, Runner.Worker");
Add<T>(extensions, "GitHub.Runner.Worker.ErrorCommandExtension, Runner.Worker");
Add<T>(extensions, "GitHub.Runner.Worker.NoticeCommandExtension, Runner.Worker");
Add<T>(extensions, "GitHub.Runner.Worker.DebugCommandExtension, Runner.Worker");
Add<T>(extensions, "GitHub.Runner.Worker.GroupCommandExtension, Runner.Worker");
Add<T>(extensions, "GitHub.Runner.Worker.EndGroupCommandExtension, Runner.Worker");

View File

@@ -90,8 +90,6 @@ namespace GitHub.Runner.Common
this.SecretMasker.AddValueEncoder(ValueEncoders.UriDataEscape);
this.SecretMasker.AddValueEncoder(ValueEncoders.XmlDataEscape);
this.SecretMasker.AddValueEncoder(ValueEncoders.TrimDoubleQuotes);
this.SecretMasker.AddValueEncoder(ValueEncoders.PowerShellPreAmpersandEscape);
this.SecretMasker.AddValueEncoder(ValueEncoders.PowerShellPostAmpersandEscape);
// Create the trace manager.
if (string.IsNullOrEmpty(logFile))
@@ -200,17 +198,9 @@ namespace GitHub.Runner.Common
if (credData != null &&
credData.Data.TryGetValue("clientId", out var clientId))
{
_userAgents.Add(new ProductInfoHeaderValue("ClientId", clientId));
_userAgents.Add(new ProductInfoHeaderValue($"RunnerId", clientId));
}
}
var runnerFile = GetConfigFile(WellKnownConfigFile.Runner);
if (File.Exists(runnerFile))
{
var runnerSettings = IOUtil.LoadObject<RunnerSettings>(runnerFile);
_userAgents.Add(new ProductInfoHeaderValue("RunnerId", runnerSettings.AgentId.ToString(CultureInfo.InvariantCulture)));
_userAgents.Add(new ProductInfoHeaderValue("GroupId", runnerSettings.PoolId.ToString(CultureInfo.InvariantCulture)));
}
}
public string GetDirectory(WellKnownDirectory directory)
@@ -350,12 +340,6 @@ namespace GitHub.Runner.Common
GetDirectory(WellKnownDirectory.Root),
".setup_info");
break;
case WellKnownConfigFile.Telemetry:
path = Path.Combine(
GetDirectory(WellKnownDirectory.Diag),
".telemetry");
break;
default:
throw new NotSupportedException($"Unexpected well known config file: '{configFile}'");

View File

@@ -2,11 +2,8 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Net.Http;
using System.Threading;
using System.Threading.Tasks;
using GitHub.Runner.Sdk;
using GitHub.Services.Common;
using GitHub.Services.WebApi;
namespace GitHub.Runner.Common
@@ -38,11 +35,7 @@ namespace GitHub.Runner.Common
public async Task ConnectAsync(VssConnection jobConnection)
{
_connection = jobConnection;
int totalAttempts = 5;
int attemptCount = totalAttempts;
var configurationStore = HostContext.GetService<IConfigurationStore>();
var runnerSettings = configurationStore.GetSettings();
int attemptCount = 5;
while (!_connection.HasAuthenticated && attemptCount-- > 0)
{
try
@@ -52,71 +45,17 @@ namespace GitHub.Runner.Common
}
catch (Exception ex) when (attemptCount > 0)
{
Trace.Info($"Catch exception during connect. {attemptCount} attempts left.");
Trace.Info($"Catch exception during connect. {attemptCount} attemp left.");
Trace.Error(ex);
if (runnerSettings.IsHostedServer)
{
await CheckNetworkEndpointsAsync(attemptCount);
}
}
int attempt = totalAttempts - attemptCount;
TimeSpan backoff = BackoffTimerHelper.GetExponentialBackoff(attempt, TimeSpan.FromMilliseconds(100), TimeSpan.FromSeconds(3.2), TimeSpan.FromMilliseconds(100));
await Task.Delay(backoff);
await Task.Delay(100);
}
_taskClient = _connection.GetClient<TaskHttpClient>();
_hasConnection = true;
}
private async Task CheckNetworkEndpointsAsync(int attemptsLeft)
{
try
{
Trace.Info("Requesting Actions Service health endpoint status");
using (var httpClientHandler = HostContext.CreateHttpClientHandler())
using (var actionsClient = new HttpClient(httpClientHandler))
{
var baseUri = new Uri(_connection.Uri.GetLeftPart(UriPartial.Authority));
actionsClient.DefaultRequestHeaders.UserAgent.AddRange(HostContext.UserAgents);
// Call the _apis/health endpoint, and include how many attempts are left as a URL query for easy tracking
var response = await actionsClient.GetAsync(new Uri(baseUri, $"_apis/health?_internalRunnerAttemptsLeft={attemptsLeft}"));
Trace.Info($"Actions health status code: {response.StatusCode}");
}
}
catch (Exception ex)
{
// Log error, but continue as this call is best-effort
Trace.Info($"Actions Service health endpoint failed due to {ex.GetType().Name}");
Trace.Error(ex);
}
try
{
Trace.Info("Requesting Github API endpoint status");
// This is a dotcom public API... just call it directly
using (var httpClientHandler = HostContext.CreateHttpClientHandler())
using (var gitHubClient = new HttpClient(httpClientHandler))
{
gitHubClient.DefaultRequestHeaders.UserAgent.AddRange(HostContext.UserAgents);
// Call the api.github.com endpoint, and include how many attempts are left as a URL query for easy tracking
var response = await gitHubClient.GetAsync($"https://api.github.com?_internalRunnerAttemptsLeft={attemptsLeft}");
Trace.Info($"api.github.com status code: {response.StatusCode}");
}
}
catch (Exception ex)
{
// Log error, but continue as this call is best-effort
Trace.Info($"Github API endpoint failed due to {ex.GetType().Name}");
Trace.Error(ex);
}
}
private void CheckConnection()
{
if (!_hasConnection)

View File

@@ -15,7 +15,6 @@ namespace GitHub.Runner.Common
[ServiceLocator(Default = typeof(JobServerQueue))]
public interface IJobServerQueue : IRunnerService, IThrottlingReporter
{
TaskCompletionSource<int> JobRecordUpdated { get; }
event EventHandler<ThrottlingEventArgs> JobServerQueueThrottling;
Task ShutdownAsync();
void Start(Pipelines.AgentJobRequestMessage jobRequest);
@@ -63,11 +62,8 @@ namespace GitHub.Runner.Common
private IJobServer _jobServer;
private Task[] _allDequeueTasks;
private readonly TaskCompletionSource<int> _jobCompletionSource = new TaskCompletionSource<int>();
private readonly TaskCompletionSource<int> _jobRecordUpdated = new TaskCompletionSource<int>();
private bool _queueInProcess = false;
public TaskCompletionSource<int> JobRecordUpdated => _jobRecordUpdated;
public event EventHandler<ThrottlingEventArgs> JobServerQueueThrottling;
// Web console dequeue will start with process queue every 250ms for the first 60*4 times (~60 seconds).
@@ -291,11 +287,11 @@ namespace GitHub.Runner.Common
{
await _jobServer.AppendTimelineRecordFeedAsync(_scopeIdentifier, _hubName, _planId, _jobTimelineId, _jobTimelineRecordId, stepRecordId, batch.Select(logLine => logLine.Line).ToList(), batch[0].LineNumber.Value, default(CancellationToken));
}
else
else
{
await _jobServer.AppendTimelineRecordFeedAsync(_scopeIdentifier, _hubName, _planId, _jobTimelineId, _jobTimelineRecordId, stepRecordId, batch.Select(logLine => logLine.Line).ToList(), default(CancellationToken));
}
if (_firstConsoleOutputs)
{
HostContext.WritePerfCounter($"WorkerJobServerQueueAppendFirstConsoleOutput_{_planId.ToString()}");
@@ -459,14 +455,6 @@ namespace GitHub.Runner.Common
{
Trace.Verbose("Cleanup buffered timeline record for timeline: {0}.", update.TimelineId);
}
if (!_jobRecordUpdated.Task.IsCompleted &&
update.PendingRecords.Any(x => x.Id == _jobTimelineRecordId && x.State != null))
{
// We have changed the state of the job
Trace.Info("Job timeline record has been updated for the first time.");
_jobRecordUpdated.TrySetResult(0);
}
}
catch (Exception ex)
{
@@ -556,11 +544,6 @@ namespace GitHub.Runner.Common
timelineRecord.WarningCount = rec.WarningCount;
}
if (rec.NoticeCount != null && rec.NoticeCount > 0)
{
timelineRecord.NoticeCount = rec.NoticeCount;
}
if (rec.Issues.Count > 0)
{
timelineRecord.Issues.Clear();

View File

@@ -3,7 +3,6 @@ using System.IO;
using System.IO.Pipes;
using System.Threading;
using System.Threading.Tasks;
using GitHub.Runner.Sdk;
namespace GitHub.Runner.Common
{
@@ -69,7 +68,6 @@ namespace GitHub.Runner.Common
public async Task SendAsync(MessageType messageType, string body, CancellationToken cancellationToken)
{
Trace.Info($"Sending message of length {body.Length}, with hash '{IOUtil.GetSha256Hash(body)}'");
await _writeStream.WriteInt32Async((int)messageType, cancellationToken);
await _writeStream.WriteStringAsync(body, cancellationToken);
}
@@ -79,7 +77,6 @@ namespace GitHub.Runner.Common
WorkerMessage result = new WorkerMessage(MessageType.NotInitialized, string.Empty);
result.MessageType = (MessageType)await _readStream.ReadInt32Async(cancellationToken);
result.Body = await _readStream.ReadStringAsync(cancellationToken);
Trace.Info($"Receiving message of length {result.Body.Length}, with hash '{IOUtil.GetSha256Hash(result.Body)}'");
return result;
}

View File

@@ -5,6 +5,7 @@
<OutputType>Library</OutputType>
<RuntimeIdentifiers>win-x64;win-x86;linux-x64;linux-arm64;linux-arm;osx-x64</RuntimeIdentifiers>
<TargetLatestRuntimePatch>true</TargetLatestRuntimePatch>
<AssetTargetFallback>portable-net45+win8</AssetTargetFallback>
<NoWarn>NU1701;NU1603</NoWarn>
<Version>$(Version)</Version>
<TieredCompilationQuickJit>true</TieredCompilationQuickJit>

View File

@@ -29,10 +29,8 @@ namespace GitHub.Runner.Common
// Configuration
Task<TaskAgent> AddAgentAsync(Int32 agentPoolId, TaskAgent agent);
Task DeleteAgentAsync(int agentPoolId, int agentId);
Task DeleteAgentAsync(int agentId);
Task<List<TaskAgentPool>> GetAgentPoolsAsync(string agentPoolName = null, TaskAgentPoolType poolType = TaskAgentPoolType.Automation);
Task<List<TaskAgent>> GetAgentsAsync(int agentPoolId, string agentName = null);
Task<List<TaskAgent>> GetAgentsAsync(string agentName);
Task<TaskAgent> ReplaceAgentAsync(int agentPoolId, TaskAgent agent);
// messagequeue
@@ -51,7 +49,7 @@ namespace GitHub.Runner.Common
Task<PackageMetadata> GetPackageAsync(string packageType, string platform, string version, bool includeToken, CancellationToken cancellationToken);
// agent update
Task<TaskAgent> UpdateAgentUpdateStateAsync(int agentPoolId, int agentId, string currentState, string trace);
Task<TaskAgent> UpdateAgentUpdateStateAsync(int agentPoolId, int agentId, string currentState);
}
public sealed class RunnerServer : RunnerService, IRunnerServer
@@ -254,11 +252,6 @@ namespace GitHub.Runner.Common
return _genericTaskAgentClient.GetAgentsAsync(agentPoolId, agentName, false);
}
public Task<List<TaskAgent>> GetAgentsAsync(string agentName)
{
return GetAgentsAsync(0, agentName); // search in all all agentPools
}
public Task<TaskAgent> ReplaceAgentAsync(int agentPoolId, TaskAgent agent)
{
CheckConnection(RunnerConnectionType.Generic);
@@ -271,11 +264,6 @@ namespace GitHub.Runner.Common
return _genericTaskAgentClient.DeleteAgentAsync(agentPoolId, agentId);
}
public Task DeleteAgentAsync(int agentId)
{
return DeleteAgentAsync(0, agentId); // agentPool is ignored server side
}
//-----------------------------------------------------------------
// MessageQueue
//-----------------------------------------------------------------
@@ -341,10 +329,25 @@ namespace GitHub.Runner.Common
return _genericTaskAgentClient.GetPackageAsync(packageType, platform, version, includeToken, cancellationToken: cancellationToken);
}
public Task<TaskAgent> UpdateAgentUpdateStateAsync(int agentPoolId, int agentId, string currentState, string trace)
public Task<TaskAgent> UpdateAgentUpdateStateAsync(int agentPoolId, int agentId, string currentState)
{
CheckConnection(RunnerConnectionType.Generic);
return _genericTaskAgentClient.UpdateAgentUpdateStateAsync(agentPoolId, agentId, currentState, trace);
return _genericTaskAgentClient.UpdateAgentUpdateStateAsync(agentPoolId, agentId, currentState);
}
//-----------------------------------------------------------------
// Runner Auth Url
//-----------------------------------------------------------------
public Task<string> GetRunnerAuthUrlAsync(int runnerPoolId, int runnerId)
{
CheckConnection(RunnerConnectionType.MessageQueue);
return _messageTaskAgentClient.GetAgentAuthUrlAsync(runnerPoolId, runnerId);
}
public Task ReportRunnerAuthUrlErrorAsync(int runnerPoolId, int runnerId, string error)
{
CheckConnection(RunnerConnectionType.MessageQueue);
return _messageTaskAgentClient.ReportAgentAuthUrlMigrationErrorAsync(runnerPoolId, runnerId, error);
}
}
}

View File

@@ -164,8 +164,9 @@ namespace GitHub.Runner.Common
if (!Silent)
{
Console.WriteLine();
Console.ResetColor();
Console.ForegroundColor = ConsoleColor.White;
Console.WriteLine($"# {message}");
Console.ResetColor();
Console.WriteLine();
}
}
@@ -176,8 +177,9 @@ namespace GitHub.Runner.Common
{
Console.ForegroundColor = ConsoleColor.Green;
Console.Write("√ ");
Console.ResetColor();
Console.ForegroundColor = ConsoleColor.White;
Console.WriteLine(message);
Console.ResetColor();
}
}

View File

@@ -29,11 +29,10 @@ namespace GitHub.Runner.Listener
{
Constants.Runner.CommandLine.Flags.Check,
Constants.Runner.CommandLine.Flags.Commit,
Constants.Runner.CommandLine.Flags.Ephemeral,
Constants.Runner.CommandLine.Flags.Help,
Constants.Runner.CommandLine.Flags.Once,
Constants.Runner.CommandLine.Flags.Replace,
Constants.Runner.CommandLine.Flags.RunAsService,
Constants.Runner.CommandLine.Flags.Once,
Constants.Runner.CommandLine.Flags.Unattended,
Constants.Runner.CommandLine.Flags.Version
};
@@ -67,9 +66,7 @@ namespace GitHub.Runner.Listener
public bool Help => TestFlag(Constants.Runner.CommandLine.Flags.Help);
public bool Unattended => TestFlag(Constants.Runner.CommandLine.Flags.Unattended);
public bool Version => TestFlag(Constants.Runner.CommandLine.Flags.Version);
public bool Ephemeral => TestFlag(Constants.Runner.CommandLine.Flags.Ephemeral);
// Keep this around since customers still relies on it
public bool RunOnce => TestFlag(Constants.Runner.CommandLine.Flags.Once);
// Constructor.

View File

@@ -22,7 +22,6 @@ namespace GitHub.Runner.Listener.Configuration
bool IsConfigured();
Task ConfigureAsync(CommandSettings command);
Task UnconfigureAsync(CommandSettings command);
void DeleteLocalRunnerConfig();
RunnerSettings LoadSettings();
}
@@ -66,18 +65,18 @@ namespace GitHub.Runner.Listener.Configuration
public async Task ConfigureAsync(CommandSettings command)
{
_term.WriteLine();
_term.WriteLine("--------------------------------------------------------------------------------");
_term.WriteLine("| ____ _ _ _ _ _ _ _ _ |");
_term.WriteLine("| / ___(_) |_| | | |_ _| |__ / \\ ___| |_(_) ___ _ __ ___ |");
_term.WriteLine("| | | _| | __| |_| | | | | '_ \\ / _ \\ / __| __| |/ _ \\| '_ \\/ __| |");
_term.WriteLine("| | |_| | | |_| _ | |_| | |_) | / ___ \\ (__| |_| | (_) | | | \\__ \\ |");
_term.WriteLine("| \\____|_|\\__|_| |_|\\__,_|_.__/ /_/ \\_\\___|\\__|_|\\___/|_| |_|___/ |");
_term.WriteLine("| |");
_term.Write("| ");
_term.WriteLine("--------------------------------------------------------------------------------", ConsoleColor.White);
_term.WriteLine("| ____ _ _ _ _ _ _ _ _ |", ConsoleColor.White);
_term.WriteLine("| / ___(_) |_| | | |_ _| |__ / \\ ___| |_(_) ___ _ __ ___ |", ConsoleColor.White);
_term.WriteLine("| | | _| | __| |_| | | | | '_ \\ / _ \\ / __| __| |/ _ \\| '_ \\/ __| |", ConsoleColor.White);
_term.WriteLine("| | |_| | | |_| _ | |_| | |_) | / ___ \\ (__| |_| | (_) | | | \\__ \\ |", ConsoleColor.White);
_term.WriteLine("| \\____|_|\\__|_| |_|\\__,_|_.__/ /_/ \\_\\___|\\__|_|\\___/|_| |_|___/ |", ConsoleColor.White);
_term.WriteLine("| |", ConsoleColor.White);
_term.Write("| ", ConsoleColor.White);
_term.Write("Self-hosted runner registration", ConsoleColor.Cyan);
_term.WriteLine(" |");
_term.WriteLine("| |");
_term.WriteLine("--------------------------------------------------------------------------------");
_term.WriteLine(" |", ConsoleColor.White);
_term.WriteLine("| |", ConsoleColor.White);
_term.WriteLine("--------------------------------------------------------------------------------", ConsoleColor.White);
Trace.Info(nameof(ConfigureAsync));
if (IsConfigured())
@@ -118,7 +117,6 @@ namespace GitHub.Runner.Listener.Configuration
try
{
// Determine the service deployment type based on connection data. (Hosted/OnPremises)
// Hosted usually means github.com or localhost, while OnPremises means GHES or GHAE
runnerSettings.IsHostedServer = runnerSettings.GitHubUrl == null || UrlUtil.IsHostedServer(new UriBuilder(runnerSettings.GitHubUrl));
// Warn if the Actions server url and GHES server url has different Host
@@ -167,7 +165,7 @@ namespace GitHub.Runner.Listener.Configuration
List<TaskAgentPool> agentPools = await _runnerServer.GetAgentPoolsAsync();
TaskAgentPool defaultPool = agentPools?.Where(x => x.IsInternal).FirstOrDefault();
if (agentPools?.Where(x => !x.IsHosted).Count() > 0)
if (agentPools?.Where(x => !x.IsHosted).Count() > 1)
{
poolName = command.GetRunnerGroupName(defaultPool?.Name);
_term.WriteLine();
@@ -188,7 +186,7 @@ namespace GitHub.Runner.Listener.Configuration
}
else
{
Trace.Info($"Found a self-hosted runner group with id {agentPool.Id} and name {agentPool.Name}");
Trace.Info("Found a self-hosted runner group with id {1} and name {2}", agentPool.Id, agentPool.Name);
runnerSettings.PoolId = agentPool.Id;
runnerSettings.PoolName = agentPool.Name;
}
@@ -196,7 +194,6 @@ namespace GitHub.Runner.Listener.Configuration
TaskAgent agent;
while (true)
{
runnerSettings.Ephemeral = command.Ephemeral;
runnerSettings.AgentName = command.GetRunnerName();
_term.WriteLine();
@@ -213,7 +210,7 @@ namespace GitHub.Runner.Listener.Configuration
if (command.GetReplace())
{
// Update existing agent with new PublicKey, agent version.
agent = UpdateExistingAgent(agent, publicKey, userLabels, runnerSettings.Ephemeral);
agent = UpdateExistingAgent(agent, publicKey, userLabels);
try
{
@@ -236,7 +233,7 @@ namespace GitHub.Runner.Listener.Configuration
else
{
// Create a new agent.
agent = CreateNewAgent(runnerSettings.AgentName, publicKey, userLabels, runnerSettings.Ephemeral);
agent = CreateNewAgent(runnerSettings.AgentName, publicKey, userLabels);
try
{
@@ -330,38 +327,6 @@ namespace GitHub.Runner.Listener.Configuration
#endif
}
// Delete .runner and .credentials files
public void DeleteLocalRunnerConfig()
{
bool isConfigured = _store.IsConfigured();
bool hasCredentials = _store.HasCredentials();
//delete credential config files
var currentAction = "Removing .credentials";
if (hasCredentials)
{
_store.DeleteCredential();
var keyManager = HostContext.GetService<IRSAKeyManager>();
keyManager.DeleteKey();
_term.WriteSuccessMessage("Removed .credentials");
}
else
{
_term.WriteLine("Does not exist. Skipping " + currentAction);
}
//delete settings config file
currentAction = "Removing .runner";
if (isConfigured)
{
_store.DeleteSettings();
_term.WriteSuccessMessage("Removed .runner");
}
else
{
_term.WriteLine("Does not exist. Skipping " + currentAction);
}
}
public async Task UnconfigureAsync(CommandSettings command)
{
string currentAction = string.Empty;
@@ -381,9 +346,12 @@ namespace GitHub.Runner.Listener.Configuration
_term.WriteLine();
_term.WriteSuccessMessage("Runner service removed");
#else
// unconfig systemd or osx service first
throw new Exception("Uninstall service first");
#elif OS_LINUX
// unconfig system D service first
throw new Exception("Unconfigure service first");
#elif OS_OSX
// unconfig osx service first
throw new Exception("Unconfigure service first");
#endif
}
@@ -415,7 +383,7 @@ namespace GitHub.Runner.Listener.Configuration
// Determine the service deployment type based on connection data. (Hosted/OnPremises)
await _runnerServer.ConnectAsync(new Uri(settings.ServerUrl), creds);
var agents = await _runnerServer.GetAgentsAsync(settings.AgentName);
var agents = await _runnerServer.GetAgentsAsync(settings.PoolId, settings.AgentName);
Trace.Verbose("Returns {0} agents", agents.Count);
TaskAgent agent = agents.FirstOrDefault();
if (agent == null)
@@ -424,7 +392,7 @@ namespace GitHub.Runner.Listener.Configuration
}
else
{
await _runnerServer.DeleteAgentAsync(settings.AgentId);
await _runnerServer.DeleteAgentAsync(settings.PoolId, settings.AgentId);
_term.WriteLine();
_term.WriteSuccessMessage("Runner removed successfully");
@@ -435,7 +403,31 @@ namespace GitHub.Runner.Listener.Configuration
_term.WriteLine("Cannot connect to server, because config files are missing. Skipping removing runner from the server.");
}
DeleteLocalRunnerConfig();
//delete credential config files
currentAction = "Removing .credentials";
if (hasCredentials)
{
_store.DeleteCredential();
var keyManager = HostContext.GetService<IRSAKeyManager>();
keyManager.DeleteKey();
_term.WriteSuccessMessage("Removed .credentials");
}
else
{
_term.WriteLine("Does not exist. Skipping " + currentAction);
}
//delete settings config file
currentAction = "Removing .runner";
if (isConfigured)
{
_store.DeleteSettings();
_term.WriteSuccessMessage("Removed .runner");
}
else
{
_term.WriteLine("Does not exist. Skipping " + currentAction);
}
}
catch (Exception)
{
@@ -466,7 +458,7 @@ namespace GitHub.Runner.Listener.Configuration
}
private TaskAgent UpdateExistingAgent(TaskAgent agent, RSAParameters publicKey, ISet<string> userLabels, bool ephemeral)
private TaskAgent UpdateExistingAgent(TaskAgent agent, RSAParameters publicKey, ISet<string> userLabels)
{
ArgUtil.NotNull(agent, nameof(agent));
agent.Authorization = new TaskAgentAuthorization
@@ -477,8 +469,6 @@ namespace GitHub.Runner.Listener.Configuration
// update should replace the existing labels
agent.Version = BuildConstants.RunnerPackage.Version;
agent.OSDescription = RuntimeInformation.OSDescription;
agent.Ephemeral = ephemeral;
agent.MaxParallelism = 1;
agent.Labels.Clear();
@@ -494,7 +484,7 @@ namespace GitHub.Runner.Listener.Configuration
return agent;
}
private TaskAgent CreateNewAgent(string agentName, RSAParameters publicKey, ISet<string> userLabels, bool ephemeral)
private TaskAgent CreateNewAgent(string agentName, RSAParameters publicKey, ISet<string> userLabels)
{
TaskAgent agent = new TaskAgent(agentName)
{
@@ -505,7 +495,6 @@ namespace GitHub.Runner.Listener.Configuration
MaxParallelism = 1,
Version = BuildConstants.RunnerPackage.Version,
OSDescription = RuntimeInformation.OSDescription,
Ephemeral = ephemeral,
};
agent.Labels.Add(new AgentLabel("self-hosted", LabelType.System));

View File

@@ -87,7 +87,7 @@ namespace GitHub.Runner.Listener.Configuration
public string GetUniqueRunnerGroupName()
{
return RunnerServiceLocalGroupPrefix + IOUtil.GetSha256Hash(HostContext.GetDirectory(WellKnownDirectory.Bin)).Substring(0, 5);
return RunnerServiceLocalGroupPrefix + IOUtil.GetPathHash(HostContext.GetDirectory(WellKnownDirectory.Bin)).Substring(0, 5);
}
public bool LocalGroupExists(string groupName)

View File

@@ -85,7 +85,7 @@ namespace GitHub.Runner.Listener.Configuration
while (true)
{
// Write the message prompt.
_terminal.Write($"{description} ");
_terminal.Write($"{description} ", ConsoleColor.White);
if(!string.IsNullOrEmpty(defaultValue))
{

View File

@@ -27,27 +27,24 @@ namespace GitHub.Runner.Listener
Task ShutdownAsync();
}
// This implementation of IJobDispatcher is not thread safe.
// It is based on the fact that the current design of the runner is a dequeue
// and processes one message from the message queue at a time.
// In addition, it only executes one job every time,
// and the server will not send another job while this one is still running.
// This implementation of IDobDispatcher is not thread safe.
// It is base on the fact that the current design of runner is dequeue
// and process one message from message queue everytime.
// In addition, it only execute one job every time,
// and server will not send another job while this one is still running.
public sealed class JobDispatcher : RunnerService, IJobDispatcher
{
private readonly Lazy<Dictionary<long, TaskResult>> _localRunJobResult = new Lazy<Dictionary<long, TaskResult>>();
private int _poolId;
IConfigurationStore _configurationStore;
RunnerSettings _runnerSettings;
RunnerSettings _runnerSetting;
private static readonly string _workerProcessName = $"Runner.Worker{IOUtil.ExeExtension}";
// this is not thread-safe
private readonly Queue<Guid> _jobDispatchedQueue = new Queue<Guid>();
private readonly ConcurrentDictionary<Guid, WorkerDispatcher> _jobInfos = new ConcurrentDictionary<Guid, WorkerDispatcher>();
// allow up to 30sec for any data to be transmitted over the process channel
// timeout limit can be overwritten by environment GITHUB_ACTIONS_RUNNER_CHANNEL_TIMEOUT
//allow up to 30sec for any data to be transmitted over the process channel
//timeout limit can be overwrite by environment GITHUB_ACTIONS_RUNNER_CHANNEL_TIMEOUT
private TimeSpan _channelTimeout;
private TaskCompletionSource<bool> _runOnceJobCompleted = new TaskCompletionSource<bool>();
@@ -57,9 +54,9 @@ namespace GitHub.Runner.Listener
base.Initialize(hostContext);
// get pool id from config
_configurationStore = hostContext.GetService<IConfigurationStore>();
_runnerSettings = _configurationStore.GetSettings();
_poolId = _runnerSettings.PoolId;
var configurationStore = hostContext.GetService<IConfigurationStore>();
_runnerSetting = configurationStore.GetSettings();
_poolId = _runnerSetting.PoolId;
int channelTimeoutSeconds;
if (!int.TryParse(Environment.GetEnvironmentVariable("GITHUB_ACTIONS_RUNNER_CHANNEL_TIMEOUT") ?? string.Empty, out channelTimeoutSeconds))
@@ -67,7 +64,7 @@ namespace GitHub.Runner.Listener
channelTimeoutSeconds = 30;
}
// _channelTimeout should be in range [30, 300] seconds
// _channelTimeout should in range [30, 300] seconds
_channelTimeout = TimeSpan.FromSeconds(Math.Min(Math.Max(channelTimeoutSeconds, 30), 300));
Trace.Info($"Set runner/worker IPC timeout to {_channelTimeout.TotalSeconds} seconds.");
}
@@ -233,12 +230,10 @@ namespace GitHub.Runner.Listener
return;
}
// based on the current design, server will only send one job for a given runner at a time.
// if the runner received a new job request while a previous job request is still running, this typically indicates two situations
// 1. a runner bug caused a server and runner mismatch on the state of the job request, e.g. the runner didn't renew the jobrequest
// properly but thinks it still owns the job reqest, however the server has already abandoned the jobrequest.
// 2. a server bug or design change that allowed the server to send more than one job request to an given runner that hasn't finished
//. a previous job request.
// base on the current design, server will only send one job for a given runner everytime.
// if the runner received a new job request while a previous job request is still running, this typically indicate two situations
// 1. an runner bug cause server and runner mismatch on the state of the job request, ex. runner not renew jobrequest properly but think it still own the job reqest, however server already abandon the jobrequest.
// 2. a server bug or design change that allow server send more than one job request to an given runner that haven't finish previous job request.
var runnerServer = HostContext.GetService<IRunnerServer>();
TaskAgentJobRequest request = null;
try
@@ -250,7 +245,7 @@ namespace GitHub.Runner.Listener
Trace.Error($"Catch job-not-found exception while checking jobrequest {jobDispatch.JobId} status. Cancel running worker right away.");
Trace.Error(ex);
jobDispatch.WorkerCancellationTokenSource.Cancel();
// make sure worker process exits before we return, otherwise we might leave an orphan worker process behind.
// make sure worker process exit before we return, otherwise we might leave orphan worker process behind.
await jobDispatch.WorkerDispatch;
return;
}
@@ -261,7 +256,7 @@ namespace GitHub.Runner.Listener
Trace.Error(ex);
jobDispatch.WorkerCancellationTokenSource.Cancel();
// make sure the worker process exits before we rethrow, otherwise we might leave orphan worker process behind.
// make sure worker process exit before we rethrow, otherwise we might leave orphan worker process behind.
await jobDispatch.WorkerDispatch;
// rethrow original exception
@@ -270,8 +265,8 @@ namespace GitHub.Runner.Listener
if (request.Result != null)
{
// job request has been finished, the server already has the result.
// this means the runner is busted since it is still running that request.
// job request has been finished, the server already has result.
// this means runner is busted since it still running that request.
// cancel the zombie worker, run next job request.
Trace.Error($"Received job request while previous job {jobDispatch.JobId} still running on worker. Cancel the previous job since the job request have been finished on server side with result: {request.Result.Value}.");
jobDispatch.WorkerCancellationTokenSource.Cancel();
@@ -510,20 +505,7 @@ namespace GitHub.Runner.Listener
{
detailInfo = string.Join(Environment.NewLine, workerOutput);
Trace.Info($"Return code {returnCode} indicate worker encounter an unhandled exception or app crash, attach worker stdout/stderr to JobRequest result.");
var jobServer = HostContext.GetService<IJobServer>();
VssCredentials jobServerCredential = VssUtil.GetVssCredential(systemConnection);
VssConnection jobConnection = VssUtil.CreateConnection(systemConnection.Url, jobServerCredential);
await jobServer.ConnectAsync(jobConnection);
await LogWorkerProcessUnhandledException(jobServer, message, detailInfo);
// Go ahead to finish the job with result 'Failed' if the STDERR from worker is System.IO.IOException, since it typically means we are running out of disk space.
if (detailInfo.Contains(typeof(System.IO.IOException).ToString(), StringComparison.OrdinalIgnoreCase))
{
Trace.Info($"Finish job with result 'Failed' due to IOException.");
await ForceFailJob(jobServer, message);
}
await LogWorkerProcessUnhandledException(message, detailInfo);
}
TaskResult result = TaskResultUtil.TranslateFromReturnCode(returnCode);
@@ -664,15 +646,13 @@ namespace GitHub.Runner.Listener
try
{
request = await runnerServer.RenewAgentRequestAsync(poolId, requestId, lockToken, orchestrationId, token);
Trace.Info($"Successfully renew job request {requestId}, job is valid till {request.LockedUntil.Value}");
if (!firstJobRequestRenewed.Task.IsCompleted)
{
// fire first renew succeed event.
firstJobRequestRenewed.TrySetResult(0);
// Update settings if the runner name has been changed server-side
UpdateAgentNameIfNeeded(request.ReservedAgent?.Name);
}
if (encounteringError > 0)
@@ -772,27 +752,6 @@ namespace GitHub.Runner.Listener
}
}
private void UpdateAgentNameIfNeeded(string agentName)
{
var isNewAgentName = !string.Equals(_runnerSettings.AgentName, agentName, StringComparison.Ordinal);
if (!isNewAgentName || string.IsNullOrEmpty(agentName))
{
return;
}
_runnerSettings.AgentName = agentName;
try
{
_configurationStore.SaveSettings(_runnerSettings);
}
catch (Exception ex)
{
Trace.Error("Cannot update the settings file:");
Trace.Error(ex);
}
}
// Best effort upload any logs for this job.
private async Task TryUploadUnfinishedLogs(Pipelines.AgentJobRequestMessage message)
{
@@ -954,16 +913,53 @@ namespace GitHub.Runner.Listener
}
// log an error issue to job level timeline record
private async Task LogWorkerProcessUnhandledException(IJobServer jobServer, Pipelines.AgentJobRequestMessage message, string errorMessage)
private async Task LogWorkerProcessUnhandledException(Pipelines.AgentJobRequestMessage message, string errorMessage)
{
try
{
var timeline = await jobServer.GetTimelineAsync(message.Plan.ScopeIdentifier, message.Plan.PlanType, message.Plan.PlanId, message.Timeline.Id, CancellationToken.None);
ArgUtil.NotNull(timeline, nameof(timeline));
var systemConnection = message.Resources.Endpoints.SingleOrDefault(x => string.Equals(x.Name, WellKnownServiceEndpointNames.SystemVssConnection));
ArgUtil.NotNull(systemConnection, nameof(systemConnection));
var jobServer = HostContext.GetService<IJobServer>();
VssCredentials jobServerCredential = VssUtil.GetVssCredential(systemConnection);
VssConnection jobConnection = VssUtil.CreateConnection(systemConnection.Url, jobServerCredential);
/* Below is the legacy 'OnPremises' code that is currently unused by the runner
ToDo: re-implement code as appropriate once GHES support is added.
// Make sure SystemConnection Url match Config Url base for OnPremises server
if (!message.Variables.ContainsKey(Constants.Variables.System.ServerType) ||
string.Equals(message.Variables[Constants.Variables.System.ServerType]?.Value, "OnPremises", StringComparison.OrdinalIgnoreCase))
{
try
{
Uri result = null;
Uri configUri = new Uri(_runnerSetting.ServerUrl);
if (Uri.TryCreate(new Uri(configUri.GetComponents(UriComponents.SchemeAndServer, UriFormat.Unescaped)), jobServerUrl.PathAndQuery, out result))
{
//replace the schema and host portion of messageUri with the host from the
//server URI (which was set at config time)
jobServerUrl = result;
}
}
catch (InvalidOperationException ex)
{
//cannot parse the Uri - not a fatal error
Trace.Error(ex);
}
catch (UriFormatException ex)
{
//cannot parse the Uri - not a fatal error
Trace.Error(ex);
}
} */
await jobServer.ConnectAsync(jobConnection);
var timeline = await jobServer.GetTimelineAsync(message.Plan.ScopeIdentifier, message.Plan.PlanType, message.Plan.PlanId, message.Timeline.Id, CancellationToken.None);
ArgUtil.NotNull(timeline, nameof(timeline));
TimelineRecord jobRecord = timeline.Records.FirstOrDefault(x => x.Id == message.JobId && x.RecordType == "Job");
ArgUtil.NotNull(jobRecord, nameof(jobRecord));
var unhandledExceptionIssue = new Issue() { Type = IssueType.Error, Message = errorMessage };
unhandledExceptionIssue.Data[Constants.Runner.InternalTelemetryIssueDataKey] = Constants.Runner.WorkerCrash;
jobRecord.ErrorCount++;
@@ -977,21 +973,6 @@ namespace GitHub.Runner.Listener
}
}
// raise job completed event to fail the job.
private async Task ForceFailJob(IJobServer jobServer, Pipelines.AgentJobRequestMessage message)
{
try
{
var jobCompletedEvent = new JobCompletedEvent(message.RequestId, message.JobId, TaskResult.Failed);
await jobServer.RaisePlanEventAsync<JobCompletedEvent>(message.Plan.ScopeIdentifier, message.Plan.PlanType, message.Plan.PlanId, jobCompletedEvent, CancellationToken.None);
}
catch (Exception ex)
{
Trace.Error("Fail to raise JobCompletedEvent back to service.");
Trace.Error(ex);
}
}
private class WorkerDispatcher : IDisposable
{
public long RequestId { get; }

View File

@@ -5,6 +5,7 @@
<OutputType>Exe</OutputType>
<RuntimeIdentifiers>win-x64;win-x86;linux-x64;linux-arm64;linux-arm;osx-x64</RuntimeIdentifiers>
<TargetLatestRuntimePatch>true</TargetLatestRuntimePatch>
<AssetTargetFallback>portable-net45+win8</AssetTargetFallback>
<NoWarn>NU1701;NU1603</NoWarn>
<Version>$(Version)</Version>
<TieredCompilationQuickJit>true</TieredCompilationQuickJit>

View File

@@ -214,7 +214,7 @@ namespace GitHub.Runner.Listener
var startupTypeAsString = command.GetStartupType();
if (string.IsNullOrEmpty(startupTypeAsString) && configuredAsService)
{
// We need try our best to make the startup type accurate
// We need try our best to make the startup type accurate
// The problem is coming from runner autoupgrade, which result an old version service host binary but a newer version runner binary
// At that time the servicehost won't pass --startuptype to Runner.Listener while the runner is actually running as service.
// We will guess the startup type only when the runner is configured as service and the guess will based on whether STDOUT/STDERR/STDIN been redirect or not
@@ -233,14 +233,8 @@ namespace GitHub.Runner.Listener
Trace.Info($"Set runner startup type - {startType}");
HostContext.StartupType = startType;
if (command.RunOnce)
{
_term.WriteLine("Warning: '--once' is going to be deprecated in the future, please consider using '--ephemeral' during runner registration.", ConsoleColor.Yellow);
_term.WriteLine("https://docs.github.com/en/actions/hosting-your-own-runners/autoscaling-with-self-hosted-runners#using-ephemeral-runners-for-autoscaling", ConsoleColor.Yellow);
}
// Run the runner interactively or as service
return await RunAsync(settings, command.RunOnce || settings.Ephemeral);
return await RunAsync(settings, command.RunOnce);
}
else
{
@@ -312,15 +306,10 @@ namespace GitHub.Runner.Listener
}
HostContext.WritePerfCounter("SessionCreated");
_term.WriteLine($"Current runner version: '{BuildConstants.RunnerPackage.Version}'");
_term.WriteLine($"{DateTime.UtcNow:u}: Listening for Jobs");
IJobDispatcher jobDispatcher = null;
CancellationTokenSource messageQueueLoopTokenSource = CancellationTokenSource.CreateLinkedTokenSource(HostContext.RunnerShutdownToken);
// Should we try to cleanup ephemeral runners
bool runOnceJobCompleted = false;
try
{
var notification = HostContext.GetService<IJobNotification>();
@@ -382,7 +371,6 @@ namespace GitHub.Runner.Listener
Task completeTask = await Task.WhenAny(getNextMessage, jobDispatcher.RunOnceJobCompleted.Task);
if (completeTask == jobDispatcher.RunOnceJobCompleted.Task)
{
runOnceJobCompleted = true;
Trace.Info("Job has finished at backend, the runner will exit since it is running under onetime use mode.");
Trace.Info("Stop message queue looping.");
messageQueueLoopTokenSource.Cancel();
@@ -478,24 +466,10 @@ namespace GitHub.Runner.Listener
await jobDispatcher.ShutdownAsync();
}
try
{
await _listener.DeleteSessionAsync();
}
catch (Exception ex) when (runOnce)
{
// ignore exception during delete session for ephemeral runner since the runner might already be deleted from the server side
// and the delete session call will ends up with 401.
Trace.Info($"Ignore any exception during DeleteSession for an ephemeral runner. {ex}");
}
//TODO: make sure we don't mask more important exception
await _listener.DeleteSessionAsync();
messageQueueLoopTokenSource.Dispose();
if (settings.Ephemeral && runOnceJobCompleted)
{
var configManager = HostContext.GetService<IConfigurationManager>();
configManager.DeleteLocalRunnerConfig();
}
}
}
catch (TaskAgentAccessTokenExpiredException)
@@ -538,9 +512,7 @@ Config Options:
--labels string Extra labels in addition to the default: 'self-hosted,{Constants.Runner.Platform},{Constants.Runner.PlatformArchitecture}'
--work string Relative runner work directory (default {Constants.Path.WorkDirectory})
--replace Replace any existing runner with the same name (default false)
--pat GitHub personal access token used for checking network connectivity when executing `.{separator}run.{ext} --check`
--ephemeral Configure the runner to only take one job and then let the service un-configure the runner after the job finishes (default false)");
--pat GitHub personal access token used for checking network connectivity when executing `.{separator}run.{ext} --check`");
#if OS_WINDOWS
_term.WriteLine($@" --runasservice Run the runner as a service");
_term.WriteLine($@" --windowslogonaccount string Account to run the service as. Requires runasservice");

View File

@@ -13,7 +13,6 @@ using GitHub.Services.WebApi;
using GitHub.Services.Common;
using GitHub.Runner.Common;
using GitHub.Runner.Sdk;
using System.Text;
namespace GitHub.Runner.Listener
{
@@ -64,25 +63,23 @@ namespace GitHub.Runner.Listener
// Print console line that warn user not shutdown runner.
await UpdateRunnerUpdateStateAsync("Runner update in progress, do not shutdown runner.");
await UpdateRunnerUpdateStateAsync($"Downloading {_targetPackage.Version} runner", $"RunnerPlatform: {_targetPackage.Platform}");
await UpdateRunnerUpdateStateAsync($"Downloading {_targetPackage.Version} runner");
var downloadTrace = await DownloadLatestRunner(token);
await DownloadLatestRunner(token);
Trace.Info($"Download latest runner and unzip into runner root.");
// wait till all running job finish
await UpdateRunnerUpdateStateAsync("Waiting for current job finish running.", downloadTrace);
await UpdateRunnerUpdateStateAsync("Waiting for current job finish running.");
await jobDispatcher.WaitAsync(token);
Trace.Info($"All running job has exited.");
// We need to keep runner backup around for macOS until we fixed https://github.com/actions/runner/issues/743
// delete runner backup
var stopWatch = Stopwatch.StartNew();
DeletePreviousVersionRunnerBackup(token);
Trace.Info($"Delete old version runner backup.");
stopWatch.Stop();
// generate update script from template
await UpdateRunnerUpdateStateAsync("Generate and execute update script.", $"DeleteRunnerBackupTime: {stopWatch.ElapsedMilliseconds}ms");
await UpdateRunnerUpdateStateAsync("Generate and execute update script.");
string updateScript = GenerateUpdateScript(restartInteractiveRunner);
Trace.Info($"Generate update script into: {updateScript}");
@@ -99,7 +96,7 @@ namespace GitHub.Runner.Listener
invokeScript.Start();
Trace.Info($"Update script start running");
await UpdateRunnerUpdateStateAsync("Runner will exit shortly for update, should be back online within 10 seconds.", $"RestartInteractiveRunner: {restartInteractiveRunner}");
await UpdateRunnerUpdateStateAsync("Runner will exit shortly for update, should back online within 10 seconds.");
return true;
}
@@ -153,10 +150,8 @@ namespace GitHub.Runner.Listener
/// </summary>
/// <param name="token"></param>
/// <returns></returns>
private async Task<string> DownloadLatestRunner(CancellationToken token)
private async Task DownloadLatestRunner(CancellationToken token)
{
var traceStringBuilder = new StringBuilder();
traceStringBuilder.AppendLine($"DownloadUrl: {_targetPackage.DownloadUrl}");
string latestRunnerDirectory = Path.Combine(HostContext.GetDirectory(WellKnownDirectory.Work), Constants.Path.UpdateDirectory);
IOUtil.DeleteDirectory(latestRunnerDirectory, token);
Directory.CreateDirectory(latestRunnerDirectory);
@@ -165,7 +160,6 @@ namespace GitHub.Runner.Listener
string archiveFile = null;
bool downloadSucceeded = false;
var stopWatch = Stopwatch.StartNew();
try
{
// Download the runner, using multiple attempts in order to be resilient against any networking/CDN issues
@@ -216,7 +210,6 @@ namespace GitHub.Runner.Listener
try
{
Trace.Info($"Download runner: begin download");
long downloadSize = 0;
//open zip stream in async mode
using (HttpClient httpClient = new HttpClient(HostContext.CreateHttpClientHandler()))
@@ -235,16 +228,11 @@ namespace GitHub.Runner.Listener
//81920 is the default used by System.IO.Stream.CopyTo and is under the large object heap threshold (85k).
await result.CopyToAsync(fs, 81920, downloadCts.Token);
await fs.FlushAsync(downloadCts.Token);
downloadSize = fs.Length;
}
}
Trace.Info($"Download runner: finished download");
downloadSucceeded = true;
stopWatch.Stop();
traceStringBuilder.AppendLine($"PackageDownloadTime: {stopWatch.ElapsedMilliseconds}ms");
traceStringBuilder.AppendLine($"Attempts: {attempt}");
traceStringBuilder.AppendLine($"PackageSize: {downloadSize / 1024 / 1024}MB");
break;
}
catch (OperationCanceledException) when (token.IsCancellationRequested)
@@ -269,7 +257,6 @@ namespace GitHub.Runner.Listener
throw new TaskCanceledException($"Runner package '{archiveFile}' failed after {Constants.RunnerDownloadRetryMaxAttempts} download attempts");
}
stopWatch.Restart();
// If we got this far, we know that we've successfully downloaded the runner package
// Validate Hash Matches if it is provided
using (FileStream stream = File.OpenRead(archiveFile))
@@ -333,9 +320,7 @@ namespace GitHub.Runner.Listener
throw new NotSupportedException($"{archiveFile}");
}
stopWatch.Stop();
Trace.Info($"Finished getting latest runner package at: {latestRunnerDirectory}.");
traceStringBuilder.AppendLine($"PackageExtractTime: {stopWatch.ElapsedMilliseconds}ms");
}
finally
{
@@ -355,7 +340,6 @@ namespace GitHub.Runner.Listener
}
}
stopWatch.Restart();
// copy latest runner into runner root folder
// copy bin from _work/_update -> bin.version under root
string binVersionDir = Path.Combine(HostContext.GetDirectory(WellKnownDirectory.Root), $"{Constants.Path.BinDirectory}.{_targetPackage.Version}");
@@ -381,11 +365,6 @@ namespace GitHub.Runner.Listener
IOUtil.DeleteFile(destination);
file.CopyTo(destination, true);
}
stopWatch.Stop();
traceStringBuilder.AppendLine($"CopyRunnerToRootTime: {stopWatch.ElapsedMilliseconds}ms");
return traceStringBuilder.ToString();
}
private void DeletePreviousVersionRunnerBackup(CancellationToken token)
@@ -505,18 +484,13 @@ namespace GitHub.Runner.Listener
return updateScript;
}
private async Task UpdateRunnerUpdateStateAsync(string currentState, string trace = "")
private async Task UpdateRunnerUpdateStateAsync(string currentState)
{
_terminal.WriteLine(currentState);
if (!string.IsNullOrEmpty(trace))
{
Trace.Info(trace);
}
try
{
await _runnerServer.UpdateAgentUpdateStateAsync(_poolId, _agentId, currentState, trace);
await _runnerServer.UpdateAgentUpdateStateAsync(_poolId, _agentId, currentState);
}
catch (VssResourceNotFoundException)
{

View File

@@ -5,6 +5,7 @@
<OutputType>Exe</OutputType>
<RuntimeIdentifiers>win-x64;win-x86;linux-x64;linux-arm64;linux-arm;osx-x64</RuntimeIdentifiers>
<TargetLatestRuntimePatch>true</TargetLatestRuntimePatch>
<AssetTargetFallback>portable-net45+win8</AssetTargetFallback>
<NoWarn>NU1701;NU1603</NoWarn>
<Version>$(Version)</Version>
<TieredCompilationQuickJit>true</TieredCompilationQuickJit>

View File

@@ -5,6 +5,7 @@
<OutputType>Library</OutputType>
<RuntimeIdentifiers>win-x64;win-x86;linux-x64;linux-arm64;linux-arm;osx-x64</RuntimeIdentifiers>
<TargetLatestRuntimePatch>true</TargetLatestRuntimePatch>
<AssetTargetFallback>portable-net45+win8</AssetTargetFallback>
<NoWarn>NU1701;NU1603</NoWarn>
<Version>$(Version)</Version>
<TieredCompilationQuickJit>true</TieredCompilationQuickJit>

View File

@@ -264,17 +264,7 @@ namespace GitHub.Runner.Sdk
{
foreach (KeyValuePair<string, string> kvp in environment)
{
#if OS_WINDOWS
string tempKey = String.IsNullOrWhiteSpace(kvp.Key) ? kvp.Key : kvp.Key.Split('\0')[0];
string tempValue = String.IsNullOrWhiteSpace(kvp.Value) ? kvp.Value : kvp.Value.Split('\0')[0];
if(!String.IsNullOrWhiteSpace(tempKey))
{
_proc.StartInfo.Environment[tempKey] = tempValue;
}
#else
_proc.StartInfo.Environment[kvp.Key] = kvp.Value;
#endif
}
}

View File

@@ -5,6 +5,7 @@
<OutputType>Library</OutputType>
<RuntimeIdentifiers>win-x64;win-x86;linux-x64;linux-arm64;linux-arm;osx-x64</RuntimeIdentifiers>
<TargetLatestRuntimePatch>true</TargetLatestRuntimePatch>
<AssetTargetFallback>portable-net45+win8</AssetTargetFallback>
<NoWarn>NU1701;NU1603</NoWarn>
<Version>$(Version)</Version>
<TieredCompilationQuickJit>true</TieredCompilationQuickJit>

View File

@@ -47,7 +47,7 @@ namespace GitHub.Runner.Sdk
return StringUtil.ConvertFromJson<T>(json);
}
public static string GetSha256Hash(string path)
public static string GetPathHash(string path)
{
string hashString = path.ToLowerInvariant();
using (SHA256 sha256hash = SHA256.Create())

View File

@@ -115,15 +115,11 @@ namespace GitHub.Runner.Sdk
}
}
#if OS_WINDOWS
trace?.Info($"{command}: command not found. Make sure '{command}' is installed and its location included in the 'Path' environment variable.");
#else
trace?.Info($"{command}: command not found. Make sure '{command}' is installed and its location included in the 'PATH' environment variable.");
#endif
trace?.Info("Not found.");
if (require)
{
throw new FileNotFoundException(
message: $"{command}: command not found",
message: $"File not found: '{command}'",
fileName: command);
}

View File

@@ -1,5 +1,7 @@
using GitHub.DistributedTask.Pipelines.ContextData;
using GitHub.DistributedTask.Pipelines;
using GitHub.DistributedTask.Pipelines.ContextData;
using GitHub.DistributedTask.WebApi;
using GitHub.Runner.Common.Util;
using GitHub.Runner.Worker.Container;
using System;
using System.Collections.Generic;
@@ -73,19 +75,11 @@ namespace GitHub.Runner.Worker
return false;
}
if (!ActionCommandManager.EnhancedAnnotationsEnabled(context) && actionCommand.Command == "notice")
{
context.Debug($"Enhanced Annotations not enabled on the server: 'notice' command will not be processed.");
return false;
}
// Serialize order
// process action command in serialize order.
lock (_commandSerializeLock)
{
// Currently stopped
if (_stopProcessCommand)
{
// Resume token
if (!string.IsNullOrEmpty(_stopToken) &&
string.Equals(actionCommand.Command, _stopToken, StringComparison.OrdinalIgnoreCase))
{
@@ -102,27 +96,17 @@ namespace GitHub.Runner.Worker
return false;
}
}
// Currently processing
else
{
// Stop command
if (string.Equals(actionCommand.Command, _stopCommand, StringComparison.OrdinalIgnoreCase))
{
ValidateStopToken(context, actionCommand.Data);
context.Output(input);
context.Debug("Paused processing commands until '##[{actionCommand.Data}]' is received");
_stopToken = actionCommand.Data;
_stopProcessCommand = true;
_registeredCommands.Add(_stopToken);
if (_stopToken.Length > 6)
{
HostContext.SecretMasker.AddValue(_stopToken);
}
context.Output(input);
context.Debug("Paused processing commands until the token you called ::stopCommands:: with is received");
return true;
}
// Found command
else if (_commandExtensions.TryGetValue(actionCommand.Command, out IActionCommandExtension extension))
{
if (context.EchoOnActionCommand && !extension.OmitEcho)
@@ -142,7 +126,6 @@ namespace GitHub.Runner.Worker
context.CommandResult = TaskResult.Failed;
}
}
// Command not found
else
{
context.Warning($"Can't find command extension for ##[{actionCommand.Command}.command].");
@@ -152,45 +135,6 @@ namespace GitHub.Runner.Worker
return true;
}
private void ValidateStopToken(IExecutionContext context, string stopToken)
{
#if OS_WINDOWS
var envContext = context.ExpressionValues["env"] as DictionaryContextData;
#else
var envContext = context.ExpressionValues["env"] as CaseSensitiveDictionaryContextData;
#endif
var allowUnsecureStopCommandTokens = false;
allowUnsecureStopCommandTokens = StringUtil.ConvertToBoolean(Environment.GetEnvironmentVariable(Constants.Variables.Actions.AllowUnsupportedStopCommandTokens));
if (!allowUnsecureStopCommandTokens && envContext.ContainsKey(Constants.Variables.Actions.AllowUnsupportedStopCommandTokens))
{
allowUnsecureStopCommandTokens = StringUtil.ConvertToBoolean(envContext[Constants.Variables.Actions.AllowUnsupportedStopCommandTokens].ToString());
}
bool isTokenInvalid = _registeredCommands.Contains(stopToken)
|| string.IsNullOrEmpty(stopToken)
|| string.Equals(stopToken, "pause-logging", StringComparison.OrdinalIgnoreCase);
if (isTokenInvalid)
{
var telemetry = new JobTelemetry
{
Message = $"Invoked ::stopCommand:: with token: [{stopToken}]",
Type = JobTelemetryType.ActionCommand
};
context.JobTelemetry.Add(telemetry);
}
if (isTokenInvalid && !allowUnsecureStopCommandTokens)
{
throw new Exception(Constants.Runner.UnsupportedStopCommandTokenDisabled);
}
}
internal static bool EnhancedAnnotationsEnabled(IExecutionContext context)
{
return context.Global.Variables.GetBoolean("DistributedTask.EnhancedAnnotations") ?? false;
}
}
public interface IActionCommandExtension : IExtension
@@ -292,7 +236,7 @@ namespace GitHub.Runner.Worker
public const String Name = "name";
}
private string[] _setEnvBlockList =
private string[] _setEnvBlockList =
{
"NODE_OPTIONS"
};
@@ -335,21 +279,8 @@ namespace GitHub.Runner.Worker
{
throw new Exception("Required field 'name' is missing in ##[save-state] command.");
}
// Embedded steps (composite) keep track of the state at the root level
if (context.IsEmbedded)
{
var id = context.EmbeddedId;
if (!context.Root.EmbeddedIntraActionState.ContainsKey(id))
{
context.Root.EmbeddedIntraActionState[id] = new Dictionary<string, string>(StringComparer.OrdinalIgnoreCase);
}
context.Root.EmbeddedIntraActionState[id][stateName] = command.Data;
}
// Otherwise modify the ExecutionContext
else
{
context.IntraActionState[stateName] = command.Data;
}
context.IntraActionState[stateName] = command.Data;
context.Debug($"Save intra-action state {stateName} = {command.Data}");
}
@@ -393,7 +324,7 @@ namespace GitHub.Runner.Worker
public Type ExtensionType => typeof(IActionCommandExtension);
public void ProcessCommand(IExecutionContext context, string line, ActionCommand command, ContainerInfo container)
{
{
var allowUnsecureCommands = false;
bool.TryParse(Environment.GetEnvironmentVariable(Constants.Variables.Actions.AllowUnsupportedCommands), out allowUnsecureCommands);
@@ -561,13 +492,6 @@ namespace GitHub.Runner.Worker
public override string Command => "error";
}
public sealed class NoticeCommandExtension : IssueCommandExtension
{
public override IssueType Type => IssueType.Notice;
public override string Command => "notice";
}
public abstract class IssueCommandExtension : RunnerService, IActionCommandExtension
{
public abstract IssueType Type { get; }
@@ -582,11 +506,6 @@ namespace GitHub.Runner.Worker
command.Properties.TryGetValue(IssueCommandProperties.Line, out string line);
command.Properties.TryGetValue(IssueCommandProperties.Column, out string column);
if (!ActionCommandManager.EnhancedAnnotationsEnabled(context))
{
context.Debug("Enhanced Annotations not enabled on the server. The 'title', 'end_line', and 'end_column' fields are unsupported.");
}
Issue issue = new Issue()
{
Category = "General",
@@ -638,73 +557,13 @@ namespace GitHub.Runner.Worker
context.AddIssue(issue);
}
public static void ValidateLinesAndColumns(ActionCommand command, IExecutionContext context)
{
command.Properties.TryGetValue(IssueCommandProperties.Line, out string line);
command.Properties.TryGetValue(IssueCommandProperties.EndLine, out string endLine);
command.Properties.TryGetValue(IssueCommandProperties.Column, out string column);
command.Properties.TryGetValue(IssueCommandProperties.EndColumn, out string endColumn);
var hasStartLine = int.TryParse(line, out int lineNumber);
var hasEndLine = int.TryParse(endLine, out int endLineNumber);
var hasStartColumn = int.TryParse(column, out int columnNumber);
var hasEndColumn = int.TryParse(endColumn, out int endColumnNumber);
var hasColumn = hasStartColumn || hasEndColumn;
if (hasEndLine && !hasStartLine)
{
context.Debug($"Invalid {command.Command} command value. '{IssueCommandProperties.EndLine}' can only be set if '{IssueCommandProperties.Line}' is provided");
command.Properties[IssueCommandProperties.Line] = endLine;
hasStartLine = true;
line = endLine;
}
if (hasEndColumn && !hasStartColumn)
{
context.Debug($"Invalid {command.Command} command value. '{IssueCommandProperties.EndColumn}' can only be set if '{IssueCommandProperties.Column}' is provided");
command.Properties[IssueCommandProperties.Column] = endColumn;
hasStartColumn = true;
column = endColumn;
}
if (!hasStartLine && hasColumn)
{
context.Debug($"Invalid {command.Command} command value. '{IssueCommandProperties.Column}' and '{IssueCommandProperties.EndColumn}' can only be set if '{IssueCommandProperties.Line}' value is provided.");
command.Properties.Remove(IssueCommandProperties.Column);
command.Properties.Remove(IssueCommandProperties.EndColumn);
}
if (hasEndLine && line != endLine && hasColumn)
{
context.Debug($"Invalid {command.Command} command value. '{IssueCommandProperties.Column}' and '{IssueCommandProperties.EndColumn}' cannot be set if '{IssueCommandProperties.Line}' and '{IssueCommandProperties.EndLine}' are different values.");
command.Properties.Remove(IssueCommandProperties.Column);
command.Properties.Remove(IssueCommandProperties.EndColumn);
}
if (hasStartLine && hasEndLine && endLineNumber < lineNumber)
{
context.Debug($"Invalid {command.Command} command value. '{IssueCommandProperties.EndLine}' cannot be less than '{IssueCommandProperties.Line}'.");
command.Properties.Remove(IssueCommandProperties.Line);
command.Properties.Remove(IssueCommandProperties.EndLine);
}
if (hasStartColumn && hasEndColumn && endColumnNumber < columnNumber)
{
context.Debug($"Invalid {command.Command} command value. '{IssueCommandProperties.EndColumn}' cannot be less than '{IssueCommandProperties.Column}'.");
command.Properties.Remove(IssueCommandProperties.Column);
command.Properties.Remove(IssueCommandProperties.EndColumn);
}
}
private static class IssueCommandProperties
{
public const String File = "file";
public const String Line = "line";
public const String EndLine = "endLine";
public const String Column = "col";
public const String EndColumn = "endColumn";
public const String Title = "title";
}
}
public sealed class GroupCommandExtension : GroupingCommandExtension

View File

@@ -37,10 +37,7 @@ namespace GitHub.Runner.Worker
public interface IActionManager : IRunnerService
{
Dictionary<Guid, ContainerInfo> CachedActionContainers { get; }
Dictionary<Guid, List<Pipelines.ActionStep>> CachedEmbeddedPreSteps { get; }
Dictionary<Guid, List<Guid>> CachedEmbeddedStepIds { get; }
Dictionary<Guid, Stack<Pipelines.ActionStep>> CachedEmbeddedPostSteps { get; }
Task<PrepareResult> PrepareActionsAsync(IExecutionContext executionContext, IEnumerable<Pipelines.JobStep> steps, Guid rootStepId = default(Guid));
Task<PrepareResult> PrepareActionsAsync(IExecutionContext executionContext, IEnumerable<Pipelines.JobStep> steps);
Definition LoadAction(IExecutionContext executionContext, Pipelines.ActionStep action);
}
@@ -51,98 +48,35 @@ namespace GitHub.Runner.Worker
//81920 is the default used by System.IO.Stream.CopyTo and is under the large object heap threshold (85k).
private const int _defaultCopyBufferSize = 81920;
private const string _dotcomApiUrl = "https://api.github.com";
private readonly Dictionary<Guid, ContainerInfo> _cachedActionContainers = new Dictionary<Guid, ContainerInfo>();
public Dictionary<Guid, ContainerInfo> CachedActionContainers => _cachedActionContainers;
private readonly Dictionary<Guid, List<Pipelines.ActionStep>> _cachedEmbeddedPreSteps = new Dictionary<Guid, List<Pipelines.ActionStep>>();
public Dictionary<Guid, List<Pipelines.ActionStep>> CachedEmbeddedPreSteps => _cachedEmbeddedPreSteps;
private readonly Dictionary<Guid, List<Guid>> _cachedEmbeddedStepIds = new Dictionary<Guid, List<Guid>>();
public Dictionary<Guid, List<Guid>> CachedEmbeddedStepIds => _cachedEmbeddedStepIds;
private readonly Dictionary<Guid, Stack<Pipelines.ActionStep>> _cachedEmbeddedPostSteps = new Dictionary<Guid, Stack<Pipelines.ActionStep>>();
public Dictionary<Guid, Stack<Pipelines.ActionStep>> CachedEmbeddedPostSteps => _cachedEmbeddedPostSteps;
public async Task<PrepareResult> PrepareActionsAsync(IExecutionContext executionContext, IEnumerable<Pipelines.JobStep> steps, Guid rootStepId = default(Guid))
public async Task<PrepareResult> PrepareActionsAsync(IExecutionContext executionContext, IEnumerable<Pipelines.JobStep> steps)
{
// Assert inputs
ArgUtil.NotNull(executionContext, nameof(executionContext));
ArgUtil.NotNull(steps, nameof(steps));
var state = new PrepareActionsState
{
ImagesToBuild = new Dictionary<string, List<Guid>>(StringComparer.OrdinalIgnoreCase),
ImagesToPull = new Dictionary<string, List<Guid>>(StringComparer.OrdinalIgnoreCase),
ImagesToBuildInfo = new Dictionary<string, ActionContainer>(StringComparer.OrdinalIgnoreCase),
PreStepTracker = new Dictionary<Guid, IActionRunner>()
};
var containerSetupSteps = new List<JobExtensionRunner>();
var depth = 0;
// We are running at the start of a job
if (rootStepId == default(Guid))
{
IOUtil.DeleteDirectory(HostContext.GetDirectory(WellKnownDirectory.Actions), executionContext.CancellationToken);
}
// We are running mid job due to a local composite action
else
{
if (!_cachedEmbeddedStepIds.ContainsKey(rootStepId))
{
_cachedEmbeddedStepIds[rootStepId] = new List<Guid>();
foreach (var compositeStep in steps)
{
var guid = Guid.NewGuid();
compositeStep.Id = guid;
_cachedEmbeddedStepIds[rootStepId].Add(guid);
}
}
depth = 1;
}
IEnumerable<Pipelines.ActionStep> actions = steps.OfType<Pipelines.ActionStep>();
executionContext.Output("Prepare all required actions");
var result = await PrepareActionsRecursiveAsync(executionContext, state, actions, depth, rootStepId);
if (state.ImagesToPull.Count > 0)
Dictionary<string, List<Guid>> imagesToPull = new Dictionary<string, List<Guid>>(StringComparer.OrdinalIgnoreCase);
Dictionary<string, List<Guid>> imagesToBuild = new Dictionary<string, List<Guid>>(StringComparer.OrdinalIgnoreCase);
Dictionary<string, ActionContainer> imagesToBuildInfo = new Dictionary<string, ActionContainer>(StringComparer.OrdinalIgnoreCase);
List<JobExtensionRunner> containerSetupSteps = new List<JobExtensionRunner>();
Dictionary<Guid, IActionRunner> preStepTracker = new Dictionary<Guid, IActionRunner>();
IEnumerable<Pipelines.ActionStep> actions = steps.OfType<Pipelines.ActionStep>();
// TODO: Deprecate the PREVIEW_ACTION_TOKEN
// Log even if we aren't using it to ensure users know.
if (!string.IsNullOrEmpty(executionContext.Global.Variables.Get("PREVIEW_ACTION_TOKEN")))
{
foreach (var imageToPull in result.ImagesToPull)
{
Trace.Info($"{imageToPull.Value.Count} steps need to pull image '{imageToPull.Key}'");
containerSetupSteps.Add(new JobExtensionRunner(runAsync: this.PullActionContainerAsync,
condition: $"{PipelineTemplateConstants.Success}()",
displayName: $"Pull {imageToPull.Key}",
data: new ContainerSetupInfo(imageToPull.Value, imageToPull.Key)));
}
executionContext.Warning("The 'PREVIEW_ACTION_TOKEN' secret is deprecated. Please remove it from the repository's secrets");
}
if (result.ImagesToBuild.Count > 0)
{
foreach (var imageToBuild in result.ImagesToBuild)
{
var setupInfo = result.ImagesToBuildInfo[imageToBuild.Key];
Trace.Info($"{imageToBuild.Value.Count} steps need to build image from '{setupInfo.Dockerfile}'");
containerSetupSteps.Add(new JobExtensionRunner(runAsync: this.BuildActionContainerAsync,
condition: $"{PipelineTemplateConstants.Success}()",
displayName: $"Build {setupInfo.ActionRepository}",
data: new ContainerSetupInfo(imageToBuild.Value, setupInfo.Dockerfile, setupInfo.WorkingDirectory)));
}
}
// Clear the cache (for self-hosted runners)
IOUtil.DeleteDirectory(HostContext.GetDirectory(WellKnownDirectory.Actions), executionContext.CancellationToken);
#if !OS_LINUX
if (containerSetupSteps.Count > 0)
{
executionContext.Output("Container action is only supported on Linux, skip pull and build docker images.");
containerSetupSteps.Clear();
}
#endif
return new PrepareResult(containerSetupSteps, result.PreStepTracker);
}
// todo: Remove when feature flag DistributedTask.NewActionMetadata is removed
var newActionMetadata = executionContext.Global.Variables.GetBoolean("DistributedTask.NewActionMetadata") ?? false;
private async Task<PrepareActionsState> PrepareActionsRecursiveAsync(IExecutionContext executionContext, PrepareActionsState state, IEnumerable<Pipelines.ActionStep> actions, Int32 depth = 0, Guid parentStepId = default(Guid))
{
ArgUtil.NotNull(executionContext, nameof(executionContext));
if (depth > Constants.CompositeActionsMaxDepth)
{
throw new Exception($"Composite action depth exceeded max depth {Constants.CompositeActionsMaxDepth}");
}
var repositoryActions = new List<Pipelines.ActionStep>();
foreach (var action in actions)
@@ -154,15 +88,66 @@ namespace GitHub.Runner.Worker
ArgUtil.NotNull(containerReference, nameof(containerReference));
ArgUtil.NotNullOrEmpty(containerReference.Image, nameof(containerReference.Image));
if (!state.ImagesToPull.ContainsKey(containerReference.Image))
if (!imagesToPull.ContainsKey(containerReference.Image))
{
state.ImagesToPull[containerReference.Image] = new List<Guid>();
imagesToPull[containerReference.Image] = new List<Guid>();
}
Trace.Info($"Action {action.Name} ({action.Id}) needs to pull image '{containerReference.Image}'");
state.ImagesToPull[containerReference.Image].Add(action.Id);
imagesToPull[containerReference.Image].Add(action.Id);
}
else if (action.Reference.Type == Pipelines.ActionSourceType.Repository)
// todo: Remove when feature flag DistributedTask.NewActionMetadata is removed
else if (action.Reference.Type == Pipelines.ActionSourceType.Repository && !newActionMetadata)
{
// only download the repository archive
await DownloadRepositoryActionAsync(executionContext, action);
// more preparation base on content in the repository (action.yml)
var setupInfo = PrepareRepositoryActionAsync(executionContext, action);
if (setupInfo != null)
{
if (!string.IsNullOrEmpty(setupInfo.Image))
{
if (!imagesToPull.ContainsKey(setupInfo.Image))
{
imagesToPull[setupInfo.Image] = new List<Guid>();
}
Trace.Info($"Action {action.Name} ({action.Id}) from repository '{setupInfo.ActionRepository}' needs to pull image '{setupInfo.Image}'");
imagesToPull[setupInfo.Image].Add(action.Id);
}
else
{
ArgUtil.NotNullOrEmpty(setupInfo.ActionRepository, nameof(setupInfo.ActionRepository));
if (!imagesToBuild.ContainsKey(setupInfo.ActionRepository))
{
imagesToBuild[setupInfo.ActionRepository] = new List<Guid>();
}
Trace.Info($"Action {action.Name} ({action.Id}) from repository '{setupInfo.ActionRepository}' needs to build image '{setupInfo.Dockerfile}'");
imagesToBuild[setupInfo.ActionRepository].Add(action.Id);
imagesToBuildInfo[setupInfo.ActionRepository] = setupInfo;
}
}
var repoAction = action.Reference as Pipelines.RepositoryPathReference;
if (repoAction.RepositoryType != Pipelines.PipelineConstants.SelfAlias)
{
var definition = LoadAction(executionContext, action);
if (definition.Data.Execution.HasPre)
{
var actionRunner = HostContext.CreateService<IActionRunner>();
actionRunner.Action = action;
actionRunner.Stage = ActionRunStage.Pre;
actionRunner.Condition = definition.Data.Execution.InitCondition;
Trace.Info($"Add 'pre' execution for {action.Id}");
preStepTracker[action.Id] = actionRunner;
}
}
}
else if (action.Reference.Type == Pipelines.ActionSourceType.Repository && newActionMetadata)
{
repositoryActions.Add(action);
}
@@ -194,96 +179,85 @@ namespace GitHub.Runner.Worker
foreach (var action in repositoryActions)
{
var setupInfo = PrepareRepositoryActionAsync(executionContext, action);
if (setupInfo != null && setupInfo.Container != null)
if (setupInfo != null)
{
if (!string.IsNullOrEmpty(setupInfo.Container.Image))
if (!string.IsNullOrEmpty(setupInfo.Image))
{
if (!state.ImagesToPull.ContainsKey(setupInfo.Container.Image))
if (!imagesToPull.ContainsKey(setupInfo.Image))
{
state.ImagesToPull[setupInfo.Container.Image] = new List<Guid>();
imagesToPull[setupInfo.Image] = new List<Guid>();
}
Trace.Info($"Action {action.Name} ({action.Id}) from repository '{setupInfo.Container.ActionRepository}' needs to pull image '{setupInfo.Container.Image}'");
state.ImagesToPull[setupInfo.Container.Image].Add(action.Id);
Trace.Info($"Action {action.Name} ({action.Id}) from repository '{setupInfo.ActionRepository}' needs to pull image '{setupInfo.Image}'");
imagesToPull[setupInfo.Image].Add(action.Id);
}
else
{
ArgUtil.NotNullOrEmpty(setupInfo.Container.ActionRepository, nameof(setupInfo.Container.ActionRepository));
ArgUtil.NotNullOrEmpty(setupInfo.ActionRepository, nameof(setupInfo.ActionRepository));
if (!state.ImagesToBuild.ContainsKey(setupInfo.Container.ActionRepository))
if (!imagesToBuild.ContainsKey(setupInfo.ActionRepository))
{
state.ImagesToBuild[setupInfo.Container.ActionRepository] = new List<Guid>();
imagesToBuild[setupInfo.ActionRepository] = new List<Guid>();
}
Trace.Info($"Action {action.Name} ({action.Id}) from repository '{setupInfo.Container.ActionRepository}' needs to build image '{setupInfo.Container.Dockerfile}'");
state.ImagesToBuild[setupInfo.Container.ActionRepository].Add(action.Id);
state.ImagesToBuildInfo[setupInfo.Container.ActionRepository] = setupInfo.Container;
Trace.Info($"Action {action.Name} ({action.Id}) from repository '{setupInfo.ActionRepository}' needs to build image '{setupInfo.Dockerfile}'");
imagesToBuild[setupInfo.ActionRepository].Add(action.Id);
imagesToBuildInfo[setupInfo.ActionRepository] = setupInfo;
}
}
else if (setupInfo != null && setupInfo.Steps != null && setupInfo.Steps.Count > 0)
{
state = await PrepareActionsRecursiveAsync(executionContext, state, setupInfo.Steps, depth + 1, action.Id);
}
var repoAction = action.Reference as Pipelines.RepositoryPathReference;
if (repoAction.RepositoryType != Pipelines.PipelineConstants.SelfAlias)
{
var definition = LoadAction(executionContext, action);
if (definition.Data.Execution.HasPre)
{
Trace.Info($"Add 'pre' execution for {action.Id}");
// Root Step
if (depth < 1)
{
var actionRunner = HostContext.CreateService<IActionRunner>();
actionRunner.Action = action;
actionRunner.Stage = ActionRunStage.Pre;
actionRunner.Condition = definition.Data.Execution.InitCondition;
state.PreStepTracker[action.Id] = actionRunner;
}
// Embedded Step
else
{
if (!_cachedEmbeddedPreSteps.ContainsKey(parentStepId))
{
_cachedEmbeddedPreSteps[parentStepId] = new List<Pipelines.ActionStep>();
}
// Clone action so we can modify the condition without affecting the original
var clonedAction = action.Clone() as Pipelines.ActionStep;
clonedAction.Condition = definition.Data.Execution.InitCondition;
_cachedEmbeddedPreSteps[parentStepId].Add(clonedAction);
}
}
var actionRunner = HostContext.CreateService<IActionRunner>();
actionRunner.Action = action;
actionRunner.Stage = ActionRunStage.Pre;
actionRunner.Condition = definition.Data.Execution.InitCondition;
if (definition.Data.Execution.HasPost && depth > 0)
{
if (!_cachedEmbeddedPostSteps.ContainsKey(parentStepId))
{
// If we haven't done so already, add the parent to the post steps
_cachedEmbeddedPostSteps[parentStepId] = new Stack<Pipelines.ActionStep>();
}
// Clone action so we can modify the condition without affecting the original
var clonedAction = action.Clone() as Pipelines.ActionStep;
clonedAction.Condition = definition.Data.Execution.CleanupCondition;
_cachedEmbeddedPostSteps[parentStepId].Push(clonedAction);
Trace.Info($"Add 'pre' execution for {action.Id}");
preStepTracker[action.Id] = actionRunner;
}
}
else if (depth > 0)
{
// if we're in a composite action and haven't loaded the local action yet
// we assume it has a post step
if (!_cachedEmbeddedPostSteps.ContainsKey(parentStepId))
{
// If we haven't done so already, add the parent to the post steps
_cachedEmbeddedPostSteps[parentStepId] = new Stack<Pipelines.ActionStep>();
}
// Clone action so we can modify the condition without affecting the original
var clonedAction = action.Clone() as Pipelines.ActionStep;
_cachedEmbeddedPostSteps[parentStepId].Push(clonedAction);
}
}
}
return state;
if (imagesToPull.Count > 0)
{
foreach (var imageToPull in imagesToPull)
{
Trace.Info($"{imageToPull.Value.Count} steps need to pull image '{imageToPull.Key}'");
containerSetupSteps.Add(new JobExtensionRunner(runAsync: this.PullActionContainerAsync,
condition: $"{PipelineTemplateConstants.Success}()",
displayName: $"Pull {imageToPull.Key}",
data: new ContainerSetupInfo(imageToPull.Value, imageToPull.Key)));
}
}
if (imagesToBuild.Count > 0)
{
foreach (var imageToBuild in imagesToBuild)
{
var setupInfo = imagesToBuildInfo[imageToBuild.Key];
Trace.Info($"{imageToBuild.Value.Count} steps need to build image from '{setupInfo.Dockerfile}'");
containerSetupSteps.Add(new JobExtensionRunner(runAsync: this.BuildActionContainerAsync,
condition: $"{PipelineTemplateConstants.Success}()",
displayName: $"Build {setupInfo.ActionRepository}",
data: new ContainerSetupInfo(imageToBuild.Value, setupInfo.Dockerfile, setupInfo.WorkingDirectory)));
}
}
#if !OS_LINUX
if (containerSetupSteps.Count > 0)
{
executionContext.Output("Container action is only supported on Linux, skip pull and build docker images.");
containerSetupSteps.Clear();
}
#endif
return new PrepareResult(containerSetupSteps, preStepTracker);
}
public Definition LoadAction(IExecutionContext executionContext, Pipelines.ActionStep action)
@@ -428,39 +402,6 @@ namespace GitHub.Runner.Worker
Trace.Verbose($"Details: {StringUtil.ConvertToJson(compositeAction?.Steps)}");
Trace.Info($"Load: {compositeAction.Outputs?.Count ?? 0} number of outputs");
Trace.Info($"Details: {StringUtil.ConvertToJson(compositeAction?.Outputs)}");
if (CachedEmbeddedPreSteps.TryGetValue(action.Id, out var preSteps))
{
compositeAction.PreSteps = preSteps;
}
if (CachedEmbeddedPostSteps.TryGetValue(action.Id, out var postSteps))
{
compositeAction.PostSteps = postSteps;
}
if (_cachedEmbeddedStepIds.ContainsKey(action.Id))
{
for (var i = 0; i < compositeAction.Steps.Count; i++)
{
// Load stored Ids for later load actions
compositeAction.Steps[i].Id = _cachedEmbeddedStepIds[action.Id][i];
if (string.IsNullOrEmpty(executionContext.Global.Variables.Get("DistributedTask.EnableCompositeActions")) && compositeAction.Steps[i].Reference.Type != Pipelines.ActionSourceType.Script)
{
throw new Exception("`uses:` keyword is not currently supported.");
}
}
}
else
{
_cachedEmbeddedStepIds[action.Id] = new List<Guid>();
foreach (var compositeStep in compositeAction.Steps)
{
var guid = Guid.NewGuid();
compositeStep.Id = guid;
_cachedEmbeddedStepIds[action.Id].Add(guid);
}
}
}
else
{
@@ -633,7 +574,6 @@ namespace GitHub.Runner.Worker
{
NameWithOwner = repositoryReference.Name,
Ref = repositoryReference.Ref,
Path = repositoryReference.Path,
};
})
.ToList();
@@ -656,12 +596,7 @@ namespace GitHub.Runner.Worker
}
catch (Exception ex) when (!executionContext.CancellationToken.IsCancellationRequested) // Do not retry if the run is canceled.
{
// UnresolvableActionDownloadInfoException is a 422 client error, don't retry
// Some possible cases are:
// * Repo is rate limited
// * Repo or tag doesn't exist, or isn't public
// * Policy validation failed
if (attempt < 3 && !(ex is WebApi.UnresolvableActionDownloadInfoException))
if (attempt < 3)
{
executionContext.Output($"Failed to resolve action download info. Error: {ex.Message}");
executionContext.Debug(ex.ToString());
@@ -677,7 +612,6 @@ namespace GitHub.Runner.Worker
// Some possible cases are:
// * Repo is rate limited
// * Repo or tag doesn't exist, or isn't public
// * Policy validation failed
if (ex is WebApi.UnresolvableActionDownloadInfoException)
{
throw;
@@ -713,6 +647,90 @@ namespace GitHub.Runner.Worker
return actionDownloadInfos.Actions;
}
// todo: Remove when feature flag DistributedTask.NewActionMetadata is removed
private async Task DownloadRepositoryActionAsync(IExecutionContext executionContext, Pipelines.ActionStep repositoryAction)
{
Trace.Entering();
ArgUtil.NotNull(executionContext, nameof(executionContext));
var repositoryReference = repositoryAction.Reference as Pipelines.RepositoryPathReference;
ArgUtil.NotNull(repositoryReference, nameof(repositoryReference));
if (string.Equals(repositoryReference.RepositoryType, Pipelines.PipelineConstants.SelfAlias, StringComparison.OrdinalIgnoreCase))
{
Trace.Info($"Repository action is in 'self' repository.");
return;
}
if (!string.Equals(repositoryReference.RepositoryType, Pipelines.RepositoryTypes.GitHub, StringComparison.OrdinalIgnoreCase))
{
throw new NotSupportedException(repositoryReference.RepositoryType);
}
ArgUtil.NotNullOrEmpty(repositoryReference.Name, nameof(repositoryReference.Name));
ArgUtil.NotNullOrEmpty(repositoryReference.Ref, nameof(repositoryReference.Ref));
string destDirectory = Path.Combine(HostContext.GetDirectory(WellKnownDirectory.Actions), repositoryReference.Name.Replace(Path.AltDirectorySeparatorChar, Path.DirectorySeparatorChar), repositoryReference.Ref);
string watermarkFile = GetWatermarkFilePath(destDirectory);
if (File.Exists(watermarkFile))
{
executionContext.Debug($"Action '{repositoryReference.Name}@{repositoryReference.Ref}' already downloaded at '{destDirectory}'.");
return;
}
else
{
// make sure we get a clean folder ready to use.
IOUtil.DeleteDirectory(destDirectory, executionContext.CancellationToken);
Directory.CreateDirectory(destDirectory);
executionContext.Output($"Download action repository '{repositoryReference.Name}@{repositoryReference.Ref}'");
}
var configurationStore = HostContext.GetService<IConfigurationStore>();
var isHostedServer = configurationStore.GetSettings().IsHostedServer;
if (isHostedServer)
{
string apiUrl = GetApiUrl(executionContext);
string archiveLink = BuildLinkToActionArchive(apiUrl, repositoryReference.Name, repositoryReference.Ref);
var downloadDetails = new ActionDownloadDetails(archiveLink, ConfigureAuthorizationFromContext);
await DownloadRepositoryActionAsync(executionContext, downloadDetails, null, destDirectory);
return;
}
else
{
string apiUrl = GetApiUrl(executionContext);
// URLs to try:
var downloadAttempts = new List<ActionDownloadDetails> {
// A built-in action or an action the user has created, on their GHES instance
// Example: https://my-ghes/api/v3/repos/my-org/my-action/tarball/v1
new ActionDownloadDetails(
BuildLinkToActionArchive(apiUrl, repositoryReference.Name, repositoryReference.Ref),
ConfigureAuthorizationFromContext),
// The same action, on GitHub.com
// Example: https://api.github.com/repos/my-org/my-action/tarball/v1
new ActionDownloadDetails(
BuildLinkToActionArchive(_dotcomApiUrl, repositoryReference.Name, repositoryReference.Ref),
configureAuthorization: (e,h) => { /* no authorization for dotcom */ })
};
foreach (var downloadAttempt in downloadAttempts)
{
try
{
await DownloadRepositoryActionAsync(executionContext, downloadAttempt, null, destDirectory);
return;
}
catch (ActionNotFoundException)
{
Trace.Info($"Failed to find the action '{repositoryReference.Name}' at ref '{repositoryReference.Ref}' at {downloadAttempt.ArchiveLink}");
continue;
}
}
throw new ActionNotFoundException($"Failed to find the action '{repositoryReference.Name}' at ref '{repositoryReference.Ref}'. Paths attempted: {string.Join(", ", downloadAttempts.Select(d => d.ArchiveLink))}");
}
}
private async Task DownloadRepositoryActionAsync(IExecutionContext executionContext, WebApi.ActionDownloadInfo downloadInfo)
{
Trace.Entering();
@@ -733,10 +751,10 @@ namespace GitHub.Runner.Worker
// make sure we get a clean folder ready to use.
IOUtil.DeleteDirectory(destDirectory, executionContext.CancellationToken);
Directory.CreateDirectory(destDirectory);
executionContext.Output($"Download action repository '{downloadInfo.NameWithOwner}@{downloadInfo.Ref}' (SHA:{downloadInfo.ResolvedSha})");
executionContext.Output($"Download action repository '{downloadInfo.NameWithOwner}@{downloadInfo.Ref}'");
}
await DownloadRepositoryActionAsync(executionContext, downloadInfo, destDirectory);
await DownloadRepositoryActionAsync(executionContext, null, downloadInfo, destDirectory);
}
private string GetApiUrl(IExecutionContext executionContext)
@@ -759,7 +777,8 @@ namespace GitHub.Runner.Worker
#endif
}
private async Task DownloadRepositoryActionAsync(IExecutionContext executionContext, WebApi.ActionDownloadInfo downloadInfo, string destDirectory)
// todo: Remove the parameter "actionDownloadDetails" when feature flag DistributedTask.NewActionMetadata is removed
private async Task DownloadRepositoryActionAsync(IExecutionContext executionContext, ActionDownloadDetails actionDownloadDetails, WebApi.ActionDownloadInfo downloadInfo, string destDirectory)
{
//download and extract action in a temp folder and rename it on success
string tempDirectory = Path.Combine(HostContext.GetDirectory(WellKnownDirectory.Actions), "_temp_" + Guid.NewGuid());
@@ -767,10 +786,10 @@ namespace GitHub.Runner.Worker
#if OS_WINDOWS
string archiveFile = Path.Combine(tempDirectory, $"{Guid.NewGuid()}.zip");
string link = downloadInfo?.ZipballUrl;
string link = downloadInfo?.ZipballUrl ?? actionDownloadDetails.ArchiveLink;
#else
string archiveFile = Path.Combine(tempDirectory, $"{Guid.NewGuid()}.tar.gz");
string link = downloadInfo?.TarballUrl;
string link = downloadInfo?.TarballUrl ?? actionDownloadDetails.ArchiveLink;
#endif
Trace.Info($"Save archive '{link}' into {archiveFile}.");
@@ -792,7 +811,16 @@ namespace GitHub.Runner.Worker
using (var httpClientHandler = HostContext.CreateHttpClientHandler())
using (var httpClient = new HttpClient(httpClientHandler))
{
httpClient.DefaultRequestHeaders.Authorization = CreateAuthHeader(downloadInfo.Authentication?.Token);
// Legacy
if (downloadInfo == null)
{
actionDownloadDetails.ConfigureAuthorization(executionContext, httpClient);
}
// FF DistributedTask.NewActionMetadata
else
{
httpClient.DefaultRequestHeaders.Authorization = CreateAuthHeader(downloadInfo.Authentication?.Token);
}
httpClient.DefaultRequestHeaders.UserAgent.AddRange(HostContext.UserAgents);
using (var response = await httpClient.GetAsync(link))
@@ -932,6 +960,7 @@ namespace GitHub.Runner.Worker
}
}
// todo: Remove when feature flag DistributedTask.NewActionMetadata is removed
private void ConfigureAuthorizationFromContext(IExecutionContext executionContext, HttpClient httpClient)
{
var authToken = Environment.GetEnvironmentVariable("_GITHUB_ACTION_TOKEN");
@@ -957,7 +986,7 @@ namespace GitHub.Runner.Worker
private string GetWatermarkFilePath(string directory) => directory + ".completed";
private ActionSetupInfo PrepareRepositoryActionAsync(IExecutionContext executionContext, Pipelines.ActionStep repositoryAction)
private ActionContainer PrepareRepositoryActionAsync(IExecutionContext executionContext, Pipelines.ActionStep repositoryAction)
{
var repositoryReference = repositoryAction.Reference as Pipelines.RepositoryPathReference;
if (string.Equals(repositoryReference.RepositoryType, Pipelines.PipelineConstants.SelfAlias, StringComparison.OrdinalIgnoreCase))
@@ -965,8 +994,8 @@ namespace GitHub.Runner.Worker
Trace.Info($"Repository action is in 'self' repository.");
return null;
}
var setupInfo = new ActionSetupInfo();
var actionContainer = new ActionContainer();
var setupInfo = new ActionContainer();
string destDirectory = Path.Combine(HostContext.GetDirectory(WellKnownDirectory.Actions), repositoryReference.Name.Replace(Path.AltDirectorySeparatorChar, Path.DirectorySeparatorChar), repositoryReference.Ref);
string actionEntryDirectory = destDirectory;
string dockerFileRelativePath = repositoryReference.Name;
@@ -975,11 +1004,11 @@ namespace GitHub.Runner.Worker
{
actionEntryDirectory = Path.Combine(destDirectory, repositoryReference.Path);
dockerFileRelativePath = $"{dockerFileRelativePath}/{repositoryReference.Path}";
actionContainer.ActionRepository = $"{repositoryReference.Name}/{repositoryReference.Path}@{repositoryReference.Ref}";
setupInfo.ActionRepository = $"{repositoryReference.Name}/{repositoryReference.Path}@{repositoryReference.Ref}";
}
else
{
actionContainer.ActionRepository = $"{repositoryReference.Name}@{repositoryReference.Ref}";
setupInfo.ActionRepository = $"{repositoryReference.Name}@{repositoryReference.Ref}";
}
// find the docker file or action.yml file
@@ -1009,9 +1038,8 @@ namespace GitHub.Runner.Worker
var dockerFileFullPath = Path.Combine(actionEntryDirectory, containerAction.Image);
executionContext.Debug($"Dockerfile for action: '{dockerFileFullPath}'.");
actionContainer.Dockerfile = dockerFileFullPath;
actionContainer.WorkingDirectory = destDirectory;
setupInfo.Container = actionContainer;
setupInfo.Dockerfile = dockerFileFullPath;
setupInfo.WorkingDirectory = destDirectory;
return setupInfo;
}
else if (containerAction.Image.StartsWith("docker://", StringComparison.OrdinalIgnoreCase))
@@ -1020,8 +1048,7 @@ namespace GitHub.Runner.Worker
executionContext.Debug($"Container image for action: '{actionImage}'.");
actionContainer.Image = actionImage;
setupInfo.Container = actionContainer;
setupInfo.Image = actionImage;
return setupInfo;
}
else
@@ -1041,30 +1068,8 @@ namespace GitHub.Runner.Worker
}
else if (actionDefinitionData.Execution.ExecutionType == ActionExecutionType.Composite)
{
Trace.Info($"Loading Composite steps");
var compositeAction = actionDefinitionData.Execution as CompositeActionExecutionData;
setupInfo.Steps = compositeAction.Steps;
// cache steps ids if not done so already
if (!_cachedEmbeddedStepIds.ContainsKey(repositoryAction.Id))
{
_cachedEmbeddedStepIds[repositoryAction.Id] = new List<Guid>();
foreach (var compositeStep in compositeAction.Steps)
{
var guid = Guid.NewGuid();
compositeStep.Id = guid;
_cachedEmbeddedStepIds[repositoryAction.Id].Add(guid);
}
}
foreach (var step in compositeAction.Steps)
{
if (string.IsNullOrEmpty(executionContext.Global.Variables.Get("DistributedTask.EnableCompositeActions")) && step.Reference.Type != Pipelines.ActionSourceType.Script)
{
throw new Exception("`uses:` keyword is not currently supported.");
}
}
return setupInfo;
Trace.Info($"Action composite: {(actionDefinitionData.Execution as CompositeActionExecutionData).Steps}, no more preparation.");
return null;
}
else
{
@@ -1074,17 +1079,15 @@ namespace GitHub.Runner.Worker
else if (File.Exists(dockerFile))
{
executionContext.Debug($"Dockerfile for action: '{dockerFile}'.");
actionContainer.Dockerfile = dockerFile;
actionContainer.WorkingDirectory = destDirectory;
setupInfo.Container = actionContainer;
setupInfo.Dockerfile = dockerFile;
setupInfo.WorkingDirectory = destDirectory;
return setupInfo;
}
else if (File.Exists(dockerFileLowerCase))
{
executionContext.Debug($"Dockerfile for action: '{dockerFileLowerCase}'.");
actionContainer.Dockerfile = dockerFileLowerCase;
actionContainer.WorkingDirectory = destDirectory;
setupInfo.Container = actionContainer;
setupInfo.Dockerfile = dockerFileLowerCase;
setupInfo.WorkingDirectory = destDirectory;
return setupInfo;
}
else
@@ -1137,6 +1140,20 @@ namespace GitHub.Runner.Worker
HostContext.SecretMasker.AddValue(base64EncodingToken);
return new AuthenticationHeaderValue("Basic", base64EncodingToken);
}
// todo: Remove when feature flag DistributedTask.NewActionMetadata is removed
private class ActionDownloadDetails
{
public string ArchiveLink { get; }
public Action<IExecutionContext, HttpClient> ConfigureAuthorization { get; }
public ActionDownloadDetails(string archiveLink, Action<IExecutionContext, HttpClient> configureAuthorization)
{
ArchiveLink = archiveLink;
ConfigureAuthorization = configureAuthorization;
}
}
}
public sealed class Definition
@@ -1199,8 +1216,6 @@ namespace GitHub.Runner.Worker
public string Pre { get; set; }
public string Post { get; set; }
public string NodeVersion { get; set; }
}
public sealed class PluginActionExecutionData : ActionExecutionData
@@ -1226,11 +1241,9 @@ namespace GitHub.Runner.Worker
public sealed class CompositeActionExecutionData : ActionExecutionData
{
public override ActionExecutionType ExecutionType => ActionExecutionType.Composite;
public override bool HasPre => PreSteps.Count > 0;
public override bool HasPost => PostSteps.Count > 0;
public List<Pipelines.ActionStep> PreSteps { get; set; }
public override bool HasPre => false;
public override bool HasPost => false;
public List<Pipelines.ActionStep> Steps { get; set; }
public Stack<Pipelines.ActionStep> PostSteps { get; set; }
public MappingToken Outputs { get; set; }
}
@@ -1290,18 +1303,4 @@ namespace GitHub.Runner.Worker
public string WorkingDirectory { get; set; }
public string ActionRepository { get; set; }
}
public class ActionSetupInfo
{
public ActionContainer Container { get; set; }
public List<Pipelines.ActionStep> Steps { get; set; }
}
public class PrepareActionsState
{
public Dictionary<string, List<Guid>> ImagesToPull;
public Dictionary<string, List<Guid>> ImagesToBuild;
public Dictionary<string, ActionContainer> ImagesToBuildInfo;
public Dictionary<Guid, IActionRunner> PreStepTracker;
}
}

View File

@@ -451,8 +451,7 @@ namespace GitHub.Runner.Worker
};
}
}
else if (string.Equals(usingToken.Value, "node12", StringComparison.OrdinalIgnoreCase)||
string.Equals(usingToken.Value, "node16", StringComparison.OrdinalIgnoreCase))
else if (string.Equals(usingToken.Value, "node12", StringComparison.OrdinalIgnoreCase))
{
if (string.IsNullOrEmpty(mainToken?.Value))
{
@@ -462,7 +461,6 @@ namespace GitHub.Runner.Worker
{
return new NodeJSActionExecutionData()
{
NodeVersion = usingToken.Value,
Script = mainToken.Value,
Pre = preToken?.Value,
InitCondition = preIfToken?.Value ?? "always()",
@@ -482,17 +480,13 @@ namespace GitHub.Runner.Worker
return new CompositeActionExecutionData()
{
Steps = steps.Cast<Pipelines.ActionStep>().ToList(),
PreSteps = new List<Pipelines.ActionStep>(),
PostSteps = new Stack<Pipelines.ActionStep>(),
InitCondition = "always()",
CleanupCondition = "always()",
Outputs = outputs
};
}
}
else
{
throw new ArgumentOutOfRangeException($"'using: {usingToken.Value}' is not supported, use 'docker', 'node12' or 'node16' instead.");
throw new ArgumentOutOfRangeException($"'using: {usingToken.Value}' is not supported, use 'docker' or 'node12' instead.");
}
}
else if (pluginToken != null)

View File

@@ -82,28 +82,6 @@ namespace GitHub.Runner.Worker
ActionExecutionData handlerData = definition.Data?.Execution;
ArgUtil.NotNull(handlerData, nameof(handlerData));
List<JobExtensionRunner> localActionContainerSetupSteps = null;
// Handle Composite Local Actions
// Need to download and expand the tree of referenced actions
if (handlerData.ExecutionType == ActionExecutionType.Composite &&
handlerData is CompositeActionExecutionData compositeHandlerData &&
Stage == ActionRunStage.Main &&
Action.Reference is Pipelines.RepositoryPathReference localAction &&
string.Equals(localAction.RepositoryType, Pipelines.PipelineConstants.SelfAlias, StringComparison.OrdinalIgnoreCase))
{
var actionManager = HostContext.GetService<IActionManager>();
var prepareResult = await actionManager.PrepareActionsAsync(ExecutionContext, compositeHandlerData.Steps, ExecutionContext.Id);
// Reload definition since post may exist now (from embedded steps that were JIT downloaded)
definition = taskManager.LoadAction(ExecutionContext, Action);
ArgUtil.NotNull(definition, nameof(definition));
handlerData = definition.Data?.Execution;
ArgUtil.NotNull(handlerData, nameof(handlerData));
// Save container setup steps so we can reference them later
localActionContainerSetupSteps = prepareResult.ContainerSetupSteps;
}
if (handlerData.HasPre &&
Action.Reference is Pipelines.RepositoryPathReference repoAction &&
string.Equals(repoAction.RepositoryType, Pipelines.PipelineConstants.SelfAlias, StringComparison.OrdinalIgnoreCase))
@@ -271,8 +249,7 @@ namespace GitHub.Runner.Worker
inputs,
environment,
ExecutionContext.Global.Variables,
actionDirectory: definition.Directory,
localActionContainerSetupSteps: localActionContainerSetupSteps);
actionDirectory: definition.Directory);
// Print out action details
handler.PrintActionDetails(Stage);

View File

@@ -1,47 +0,0 @@
using System;
using System.Text;
using GitHub.DistributedTask.Pipelines;
using GitHub.Runner.Common;
using GitHub.Runner.Sdk;
using ObjectTemplating = GitHub.DistributedTask.ObjectTemplating;
namespace GitHub.Runner.Worker
{
public sealed class ConditionTraceWriter : ObjectTemplating::ITraceWriter
{
private readonly IExecutionContext _executionContext;
private readonly Tracing _trace;
private readonly StringBuilder _traceBuilder = new StringBuilder();
public string Trace => _traceBuilder.ToString();
public ConditionTraceWriter(Tracing trace, IExecutionContext executionContext)
{
ArgUtil.NotNull(trace, nameof(trace));
_trace = trace;
_executionContext = executionContext;
}
public void Error(string format, params Object[] args)
{
var message = StringUtil.Format(format, args);
_trace.Error(message);
_executionContext?.Debug(message);
}
public void Info(string format, params Object[] args)
{
var message = StringUtil.Format(format, args);
_trace.Info(message);
_executionContext?.Debug(message);
_traceBuilder.AppendLine(message);
}
public void Verbose(string format, params Object[] args)
{
var message = StringUtil.Format(format, args);
_trace.Verbose(message);
_executionContext?.Debug(message);
}
}
}

View File

@@ -46,7 +46,7 @@ namespace GitHub.Runner.Worker.Container
{
base.Initialize(hostContext);
DockerPath = WhichUtil.Which("docker", true, Trace);
DockerInstanceLabel = IOUtil.GetSha256Hash(hostContext.GetDirectory(WellKnownDirectory.Root)).Substring(0, 6);
DockerInstanceLabel = IOUtil.GetPathHash(hostContext.GetDirectory(WellKnownDirectory.Root)).Substring(0, 6);
}
public async Task<DockerVersion> DockerVersion(IExecutionContext context)
@@ -131,11 +131,11 @@ namespace GitHub.Runner.Worker.Container
{
if (String.IsNullOrEmpty(env.Value))
{
dockerOptions.Add(DockerUtil.CreateEscapedOption("-e", env.Key));
dockerOptions.Add($"-e \"{env.Key}\"");
}
else
{
dockerOptions.Add(DockerUtil.CreateEscapedOption("-e", env.Key, env.Value));
dockerOptions.Add($"-e \"{env.Key}={env.Value.Replace("\"", "\\\"")}\"");
}
}
@@ -202,7 +202,7 @@ namespace GitHub.Runner.Worker.Container
{
// e.g. -e MY_SECRET maps the value into the exec'ed process without exposing
// the value directly in the command
dockerOptions.Add(DockerUtil.CreateEscapedOption("-e", env.Key));
dockerOptions.Add($"-e {env.Key}");
}
// Watermark for GitHub Action environment

View File

@@ -6,9 +6,6 @@ namespace GitHub.Runner.Worker.Container
{
public class DockerUtil
{
private static readonly Regex QuoteEscape = new Regex(@"(\\*)" + "\"", RegexOptions.Compiled);
private static readonly Regex EndOfStringEscape = new Regex(@"(\\+)$", RegexOptions.Compiled);
public static List<PortMapping> ParseDockerPort(IList<string> portMappingLines)
{
const string targetPort = "targetPort";
@@ -20,7 +17,7 @@ namespace GitHub.Runner.Worker.Container
string pattern = $"^(?<{targetPort}>\\d+)/(?<{proto}>\\w+) -> (?<{host}>.+):(?<{hostPort}>\\d+)$";
List<PortMapping> portMappings = new List<PortMapping>();
foreach (var line in portMappingLines)
foreach(var line in portMappingLines)
{
Match m = Regex.Match(line, pattern, RegexOptions.None, TimeSpan.FromSeconds(1));
if (m.Success)
@@ -64,44 +61,5 @@ namespace GitHub.Runner.Worker.Container
}
return "";
}
public static string CreateEscapedOption(string flag, string key)
{
if (String.IsNullOrEmpty(key))
{
return "";
}
return $"{flag} {EscapeString(key)}";
}
public static string CreateEscapedOption(string flag, string key, string value)
{
if (String.IsNullOrEmpty(key))
{
return "";
}
var escapedString = EscapeString($"{key}={value}");
return $"{flag} {escapedString}";
}
private static string EscapeString(string value)
{
if (String.IsNullOrEmpty(value))
{
return "";
}
// Dotnet escaping rules are weird here, we can only escape \ if it precedes a "
// If a double quotation mark follows two or an even number of backslashes, each proceeding backslash pair is replaced with one backslash and the double quotation mark is removed.
// If a double quotation mark follows an odd number of backslashes, including just one, each preceding pair is replaced with one backslash and the remaining backslash is removed; however, in this case the double quotation mark is not removed.
// https://docs.microsoft.com/en-us/dotnet/api/system.environment.getcommandlineargs?redirectedfrom=MSDN&view=net-6.0#remarks
// First, find any \ followed by a " and double the number of \ + 1.
value = QuoteEscape.Replace(value, @"$1$1\" + "\"");
// Next, what if it ends in `\`, it would escape the end quote. So, we need to detect that at the end of the string and perform the same escape
// Luckily, we can just use the $ character with detects the end of string in regex
value = EndOfStringEscape.Replace(value, @"$1$1");
// Finally, wrap it in quotes
return $"\"{value}\"";
}
}
}

View File

@@ -494,8 +494,7 @@ namespace GitHub.Runner.Worker
private void UpdateRegistryAuthForGitHubToken(IExecutionContext executionContext, ContainerInfo container)
{
var registryIsTokenCompatible = container.RegistryServer.Equals("ghcr.io", StringComparison.OrdinalIgnoreCase) || container.RegistryServer.Equals("containers.pkg.github.com", StringComparison.OrdinalIgnoreCase);
var isFallbackTokenFromHostedGithub = HostContext.GetService<IConfigurationStore>().GetSettings().IsHostedServer;
if (!registryIsTokenCompatible || !isFallbackTokenFromHostedGithub)
if (!registryIsTokenCompatible)
{
return;
}

View File

@@ -36,11 +36,8 @@ namespace GitHub.Runner.Worker
public interface IExecutionContext : IRunnerService
{
Guid Id { get; }
Guid EmbeddedId { get; }
string ScopeName { get; }
string SiblingScopeName { get; }
string ContextName { get; }
ActionRunStage Stage { get; }
Task ForceCompleted { get; }
TaskResult? Result { get; set; }
TaskResult? Outcome { get; set; }
@@ -52,8 +49,6 @@ namespace GitHub.Runner.Worker
Dictionary<string, string> IntraActionState { get; }
Dictionary<string, VariableValue> JobOutputs { get; }
ActionsEnvironmentReference ActionsEnvironment { get; }
List<ActionsStepTelemetry> ActionsStepsTelemetry { get; }
List<JobTelemetry> JobTelemetry { get; }
DictionaryContextData ExpressionValues { get; }
IList<IFunctionInfo> ExpressionFunctions { get; }
JobContext JobContext { get; }
@@ -63,10 +58,6 @@ namespace GitHub.Runner.Worker
// Only job level ExecutionContext has PostJobSteps
Stack<IStep> PostJobSteps { get; }
Dictionary<Guid, string> EmbeddedStepsWithPostRegistered { get; }
// Keep track of embedded steps states
Dictionary<Guid, Dictionary<string, string>> EmbeddedIntraActionState { get; }
bool EchoOnActionCommand { get; set; }
@@ -77,8 +68,8 @@ namespace GitHub.Runner.Worker
// Initialize
void InitializeJob(Pipelines.AgentJobRequestMessage message, CancellationToken token);
void CancelToken();
IExecutionContext CreateChild(Guid recordId, string displayName, string refName, string scopeName, string contextName, ActionRunStage stage, Dictionary<string, string> intraActionState = null, int? recordOrder = null, IPagingLogger logger = null, bool isEmbedded = false, CancellationTokenSource cancellationTokenSource = null, Guid embeddedId = default(Guid), string siblingScopeName = null);
IExecutionContext CreateEmbeddedChild(string scopeName, string contextName, Guid embeddedId, ActionRunStage stage, Dictionary<string, string> intraActionState = null, string siblingScopeName = null);
IExecutionContext CreateChild(Guid recordId, string displayName, string refName, string scopeName, string contextName, Dictionary<string, string> intraActionState = null, int? recordOrder = null, IPagingLogger logger = null, bool isEmbedded = false, CancellationTokenSource cancellationTokenSource = null);
IExecutionContext CreateEmbeddedChild(string scopeName, string contextName);
// logging
long Write(string tag, string message);
@@ -141,19 +132,14 @@ namespace GitHub.Runner.Worker
private long _totalThrottlingDelayInMilliseconds = 0;
public Guid Id => _record.Id;
public Guid EmbeddedId { get; private set; }
public string ScopeName { get; private set; }
public string SiblingScopeName { get; private set; }
public string ContextName { get; private set; }
public ActionRunStage Stage { get; private set; }
public Task ForceCompleted => _forceCompleted.Task;
public CancellationToken CancellationToken => _cancellationTokenSource.Token;
public Dictionary<string, string> IntraActionState { get; private set; }
public Dictionary<string, VariableValue> JobOutputs { get; private set; }
public ActionsEnvironmentReference ActionsEnvironment { get; private set; }
public List<ActionsStepTelemetry> ActionsStepsTelemetry { get; private set; }
public List<JobTelemetry> JobTelemetry { get; private set; }
public DictionaryContextData ExpressionValues { get; } = new DictionaryContextData();
public IList<IFunctionInfo> ExpressionFunctions { get; } = new List<IFunctionInfo>();
@@ -169,11 +155,6 @@ namespace GitHub.Runner.Worker
// Only job level ExecutionContext has StepsWithPostRegistered
public HashSet<Guid> StepsWithPostRegistered { get; private set; }
// Only job level ExecutionContext has EmbeddedStepsWithPostRegistered
public Dictionary<Guid, string> EmbeddedStepsWithPostRegistered { get; private set; }
public Dictionary<Guid, Dictionary<string, string>> EmbeddedIntraActionState { get; private set; }
public bool EchoOnActionCommand { get; set; }
// An embedded execution context shares the same record ID, record name, and logger
@@ -264,37 +245,17 @@ namespace GitHub.Runner.Worker
public void RegisterPostJobStep(IStep step)
{
string siblingScopeName = null;
if (this.IsEmbedded)
{
if (step is IActionRunner actionRunner)
{
if (Root.EmbeddedStepsWithPostRegistered.ContainsKey(actionRunner.Action.Id))
{
Trace.Info($"'post' of '{actionRunner.DisplayName}' already push to child post step stack.");
}
else
{
Root.EmbeddedStepsWithPostRegistered[actionRunner.Action.Id] = actionRunner.Condition;
}
return;
}
}
else if (step is IActionRunner actionRunner && !Root.StepsWithPostRegistered.Add(actionRunner.Action.Id))
if (step is IActionRunner actionRunner && !Root.StepsWithPostRegistered.Add(actionRunner.Action.Id))
{
Trace.Info($"'post' of '{actionRunner.DisplayName}' already push to post step stack.");
return;
}
if (step is IActionRunner runner)
{
siblingScopeName = runner.Action.ContextName;
}
step.ExecutionContext = Root.CreatePostChild(step.DisplayName, IntraActionState, siblingScopeName);
step.ExecutionContext = Root.CreatePostChild(step.DisplayName, IntraActionState);
Root.PostJobSteps.Push(step);
}
public IExecutionContext CreateChild(Guid recordId, string displayName, string refName, string scopeName, string contextName, ActionRunStage stage, Dictionary<string, string> intraActionState = null, int? recordOrder = null, IPagingLogger logger = null, bool isEmbedded = false, CancellationTokenSource cancellationTokenSource = null, Guid embeddedId = default(Guid), string siblingScopeName = null)
public IExecutionContext CreateChild(Guid recordId, string displayName, string refName, string scopeName, string contextName, Dictionary<string, string> intraActionState = null, int? recordOrder = null, IPagingLogger logger = null, bool isEmbedded = false, CancellationTokenSource cancellationTokenSource = null)
{
Trace.Entering();
@@ -303,10 +264,6 @@ namespace GitHub.Runner.Worker
child.Global = Global;
child.ScopeName = scopeName;
child.ContextName = contextName;
child.Stage = stage;
child.EmbeddedId = embeddedId;
child.SiblingScopeName = siblingScopeName;
child.JobTelemetry = JobTelemetry;
if (intraActionState == null)
{
child.IntraActionState = new Dictionary<string, string>(StringComparer.OrdinalIgnoreCase);
@@ -354,9 +311,9 @@ namespace GitHub.Runner.Worker
/// An embedded execution context shares the same record ID, record name, logger,
/// and a linked cancellation token.
/// </summary>
public IExecutionContext CreateEmbeddedChild(string scopeName, string contextName, Guid embeddedId, ActionRunStage stage, Dictionary<string, string> intraActionState = null, string siblingScopeName = null)
public IExecutionContext CreateEmbeddedChild(string scopeName, string contextName)
{
return Root.CreateChild(_record.Id, _record.Name, _record.Id.ToString("N"), scopeName, contextName, stage, logger: _logger, isEmbedded: true, cancellationTokenSource: CancellationTokenSource.CreateLinkedTokenSource(_cancellationTokenSource.Token), intraActionState: intraActionState, embeddedId: embeddedId, siblingScopeName: siblingScopeName);
return Root.CreateChild(_record.Id, _record.Name, _record.Id.ToString("N"), scopeName, contextName, logger: _logger, isEmbedded: true, cancellationTokenSource: CancellationTokenSource.CreateLinkedTokenSource(_cancellationTokenSource.Token));
}
public void Start(string currentOperation = null)
@@ -412,7 +369,7 @@ namespace GitHub.Runner.Worker
_logger.End();
// Skip if generated context name. Generated context names start with "__". After 3.2 the server will never send an empty context name.
// Skip if generated context name. Generated context names start with "__". After M271-ish the server will never send an empty context name.
if (!string.IsNullOrEmpty(ContextName) && !ContextName.StartsWith("__", StringComparison.Ordinal))
{
Global.StepsContext.SetOutcome(ScopeName, ContextName, (Outcome ?? Result ?? TaskResult.Succeeded).ToActionResult());
@@ -475,7 +432,7 @@ namespace GitHub.Runner.Worker
{
ArgUtil.NotNullOrEmpty(name, nameof(name));
// Skip if generated context name. Generated context names start with "__". After 3.2 the server will never send an empty context name.
// Skip if generated context name. Generated context names start with "__". After M271-ish the server will never send an empty context name.
if (string.IsNullOrEmpty(ContextName) || ContextName.StartsWith("__", StringComparison.Ordinal))
{
reference = null;
@@ -554,24 +511,6 @@ namespace GitHub.Runner.Worker
_record.WarningCount++;
}
else if (issue.Type == IssueType.Notice)
{
// tracking line number for each issue in log file
// log UI use this to navigate from issue to log
if (!string.IsNullOrEmpty(logMessage))
{
long logLineNumber = Write(WellKnownTags.Notice, logMessage);
issue.Data["logFileLineNumber"] = logLineNumber.ToString();
}
if (_record.NoticeCount < _maxIssueCount)
{
_record.Issues.Add(issue);
}
_record.NoticeCount++;
}
_jobServerQueue.QueueTimelineRecordUpdate(_mainTimelineId, _record);
}
@@ -660,11 +599,6 @@ namespace GitHub.Runner.Worker
// Actions environment
ActionsEnvironment = message.ActionsEnvironment;
// ActionsStepTelemetry
ActionsStepsTelemetry = new List<ActionsStepTelemetry>();
JobTelemetry = new List<JobTelemetry>();
// Service container info
Global.ServiceContainers = new List<ContainerInfo>();
@@ -725,12 +659,6 @@ namespace GitHub.Runner.Worker
// StepsWithPostRegistered for job ExecutionContext
StepsWithPostRegistered = new HashSet<Guid>();
// EmbeddedStepsWithPostRegistered for job ExecutionContext
EmbeddedStepsWithPostRegistered = new Dictionary<Guid, string>();
// EmbeddedIntraActionState for job ExecutionContext
EmbeddedIntraActionState = new Dictionary<Guid, Dictionary<string, string>>();
// Job timeline record.
InitializeTimelineRecord(
timelineId: message.Timeline.Id,
@@ -907,7 +835,6 @@ namespace GitHub.Runner.Worker
_record.State = TimelineRecordState.Pending;
_record.ErrorCount = 0;
_record.WarningCount = 0;
_record.NoticeCount = 0;
if (parentTimelineRecordId != null && parentTimelineRecordId.Value != Guid.Empty)
{
@@ -937,7 +864,7 @@ namespace GitHub.Runner.Worker
}
}
private IExecutionContext CreatePostChild(string displayName, Dictionary<string, string> intraActionState, string siblingScopeName = null)
private IExecutionContext CreatePostChild(string displayName, Dictionary<string, string> intraActionState)
{
if (!_expandedForPostJob)
{
@@ -947,7 +874,7 @@ namespace GitHub.Runner.Worker
}
var newGuid = Guid.NewGuid();
return CreateChild(newGuid, displayName, newGuid.ToString("N"), null, null, ActionRunStage.Post, intraActionState, _childTimelineRecordOrder - Root.PostJobSteps.Count, siblingScopeName: siblingScopeName);
return CreateChild(newGuid, displayName, newGuid.ToString("N"), null, null, intraActionState, _childTimelineRecordOrder - Root.PostJobSteps.Count);
}
}
@@ -980,7 +907,7 @@ namespace GitHub.Runner.Worker
// Do not add a format string overload. See comment on ExecutionContext.Write().
public static void InfrastructureError(this IExecutionContext context, string message)
{
context.AddIssue(new Issue() { Type = IssueType.Error, Message = message, IsInfrastructureIssue = true });
context.AddIssue(new Issue() { Type = IssueType.Error, Message = message, IsInfrastructureIssue = true});
}
// Do not add a format string overload. See comment on ExecutionContext.Write().
@@ -1079,7 +1006,6 @@ namespace GitHub.Runner.Worker
public static readonly string Command = "##[command]";
public static readonly string Error = "##[error]";
public static readonly string Warning = "##[warning]";
public static readonly string Notice = "##[notice]";
public static readonly string Debug = "##[debug]";
}
}

View File

@@ -24,19 +24,8 @@ namespace GitHub.Runner.Worker.Expressions
ArgUtil.NotNull(templateContext, nameof(templateContext));
var executionContext = templateContext.State[nameof(IExecutionContext)] as IExecutionContext;
ArgUtil.NotNull(executionContext, nameof(executionContext));
// Decide based on 'action_status' for composite MAIN steps and 'job.status' for pre, post and job-level steps
var isCompositeMainStep = executionContext.IsEmbedded && executionContext.Stage == ActionRunStage.Main;
if (isCompositeMainStep)
{
ActionResult actionStatus = EnumUtil.TryParse<ActionResult>(executionContext.GetGitHubContext("action_status")) ?? ActionResult.Success;
return actionStatus == ActionResult.Failure;
}
else
{
ActionResult jobStatus = executionContext.JobContext.Status ?? ActionResult.Success;
return jobStatus == ActionResult.Failure;
}
ActionResult jobStatus = executionContext.JobContext.Status ?? ActionResult.Success;
return jobStatus == ActionResult.Failure;
}
}
}

View File

@@ -24,19 +24,8 @@ namespace GitHub.Runner.Worker.Expressions
ArgUtil.NotNull(templateContext, nameof(templateContext));
var executionContext = templateContext.State[nameof(IExecutionContext)] as IExecutionContext;
ArgUtil.NotNull(executionContext, nameof(executionContext));
// Decide based on 'action_status' for composite MAIN steps and 'job.status' for pre, post and job-level steps
var isCompositeMainStep = executionContext.IsEmbedded && executionContext.Stage == ActionRunStage.Main;
if (isCompositeMainStep)
{
ActionResult actionStatus = EnumUtil.TryParse<ActionResult>(executionContext.GetGitHubContext("action_status")) ?? ActionResult.Success;
return actionStatus == ActionResult.Success;
}
else
{
ActionResult jobStatus = executionContext.JobContext.Status ?? ActionResult.Success;
return jobStatus == ActionResult.Success;
}
ActionResult jobStatus = executionContext.JobContext.Status ?? ActionResult.Success;
return jobStatus == ActionResult.Success;
}
}
}

View File

@@ -23,13 +23,9 @@ namespace GitHub.Runner.Worker
"job",
"path",
"ref",
"ref_name",
"ref_protected",
"ref_type",
"repository",
"repository_owner",
"retention_days",
"run_attempt",
"run_id",
"run_number",
"server_url",
@@ -42,16 +38,9 @@ namespace GitHub.Runner.Worker
{
foreach (var data in this)
{
if (_contextEnvAllowlist.Contains(data.Key))
if (_contextEnvAllowlist.Contains(data.Key) && data.Value is StringContextData value)
{
if (data.Value is StringContextData value)
{
yield return new KeyValuePair<string, string>($"GITHUB_{data.Key.ToUpperInvariant()}", value);
}
else if (data.Value is BooleanContextData booleanValue)
{
yield return new KeyValuePair<string, string>($"GITHUB_{data.Key.ToUpperInvariant()}", booleanValue.ToString());
}
yield return new KeyValuePair<string, string>($"GITHUB_{data.Key.ToUpperInvariant()}", value);
}
}
}

View File

@@ -5,16 +5,11 @@ using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using GitHub.DistributedTask.Expressions2;
using GitHub.DistributedTask.ObjectTemplating.Tokens;
using GitHub.DistributedTask.Pipelines.ContextData;
using GitHub.DistributedTask.Pipelines.ObjectTemplating;
using GitHub.DistributedTask.WebApi;
using GitHub.Runner.Common;
using GitHub.Runner.Common.Util;
using GitHub.Runner.Sdk;
using GitHub.Runner.Worker;
using GitHub.Runner.Worker.Expressions;
using Pipelines = GitHub.DistributedTask.Pipelines;
@@ -35,68 +30,8 @@ namespace GitHub.Runner.Worker.Handlers
Trace.Entering();
ArgUtil.NotNull(ExecutionContext, nameof(ExecutionContext));
ArgUtil.NotNull(Inputs, nameof(Inputs));
ArgUtil.NotNull(Data.Steps, nameof(Data.Steps));
List<Pipelines.ActionStep> steps;
if (stage == ActionRunStage.Pre)
{
ArgUtil.NotNull(Data.PreSteps, nameof(Data.PreSteps));
steps = Data.PreSteps;
}
else if (stage == ActionRunStage.Post)
{
ArgUtil.NotNull(Data.PostSteps, nameof(Data.PostSteps));
steps = new List<Pipelines.ActionStep>();
// Only register post steps for steps that actually ran
foreach (var step in Data.PostSteps.ToList())
{
if (ExecutionContext.Root.EmbeddedStepsWithPostRegistered.ContainsKey(step.Id))
{
step.Condition = ExecutionContext.Root.EmbeddedStepsWithPostRegistered[step.Id];
steps.Add(step);
}
else
{
Trace.Info($"Skipping executing post step id: {step.Id}, name: ${step.DisplayName}");
}
}
}
else
{
ArgUtil.NotNull(Data.Steps, nameof(Data.Steps));
steps = Data.Steps;
}
// Add Telemetry to JobContext to send with JobCompleteMessage
if (stage == ActionRunStage.Main)
{
var hasRunsStep = false;
var hasUsesStep = false;
foreach (var step in steps)
{
if (step.Reference.Type == Pipelines.ActionSourceType.Script)
{
hasRunsStep = true;
}
else
{
hasUsesStep = true;
}
}
var pathReference = Action as Pipelines.RepositoryPathReference;
var telemetry = new ActionsStepTelemetry {
Ref = GetActionRef(),
HasPreStep = Data.HasPre,
HasPostStep = Data.HasPost,
IsEmbedded = ExecutionContext.IsEmbedded,
Type = "composite",
HasRunsStep = hasRunsStep,
HasUsesStep = hasUsesStep,
StepCount = steps.Count
};
ExecutionContext.Root.ActionsStepsTelemetry.Add(telemetry);
}
try
{
// Inputs of the composite step
@@ -106,7 +41,7 @@ namespace GitHub.Runner.Worker.Handlers
inputsData[i.Key] = new StringContextData(i.Value);
}
// Temporary hack until after 3.2. After 3.2 the server will never send an empty
// Temporary hack until after M271-ish. After M271-ish the server will never send an empty
// context name. Generated context names start with "__"
var childScopeName = ExecutionContext.GetFullyQualifiedContextName();
if (string.IsNullOrEmpty(childScopeName))
@@ -116,44 +51,15 @@ namespace GitHub.Runner.Worker.Handlers
// Create embedded steps
var embeddedSteps = new List<IStep>();
// If we need to setup containers beforehand, do it
// only relevant for local composite actions that need to JIT download/setup containers
if (LocalActionContainerSetupSteps != null && LocalActionContainerSetupSteps.Count > 0)
foreach (Pipelines.ActionStep stepData in Data.Steps)
{
foreach (var step in LocalActionContainerSetupSteps)
{
ArgUtil.NotNull(step, step.DisplayName);
var stepId = $"__{Guid.NewGuid()}";
step.ExecutionContext = ExecutionContext.CreateEmbeddedChild(childScopeName, stepId, Guid.NewGuid(), stage);
embeddedSteps.Add(step);
}
}
foreach (Pipelines.ActionStep stepData in steps)
{
// Compute child sibling scope names for post steps
// We need to use the main's scope to keep step context correct, makes inputs flow correctly
string siblingScopeName = null;
if (!String.IsNullOrEmpty(ExecutionContext.SiblingScopeName) && stage == ActionRunStage.Post)
{
siblingScopeName = $"{ExecutionContext.SiblingScopeName}.{stepData.ContextName}";
}
var step = HostContext.CreateService<IActionRunner>();
step.Action = stepData;
step.Stage = stage;
step.Condition = stepData.Condition;
ExecutionContext.Root.EmbeddedIntraActionState.TryGetValue(step.Action.Id, out var intraActionState);
step.ExecutionContext = ExecutionContext.CreateEmbeddedChild(childScopeName, stepData.ContextName, step.Action.Id, stage, intraActionState: intraActionState, siblingScopeName: siblingScopeName);
step.ExecutionContext = ExecutionContext.CreateEmbeddedChild(childScopeName, stepData.ContextName);
step.ExecutionContext.ExpressionValues["inputs"] = inputsData;
if (!String.IsNullOrEmpty(ExecutionContext.SiblingScopeName))
{
step.ExecutionContext.ExpressionValues["steps"] = ExecutionContext.Global.StepsContext.GetScope(ExecutionContext.SiblingScopeName);
}
else
{
step.ExecutionContext.ExpressionValues["steps"] = ExecutionContext.Global.StepsContext.GetScope(childScopeName);
}
step.ExecutionContext.ExpressionValues["steps"] = ExecutionContext.Global.StepsContext.GetScope(childScopeName);
// Shallow copy github context
var gitHubContext = step.ExecutionContext.ExpressionValues["github"] as GitHubContext;
@@ -168,12 +74,13 @@ namespace GitHub.Runner.Worker.Handlers
}
// Run embedded steps
await RunStepsAsync(embeddedSteps, stage);
await RunStepsAsync(embeddedSteps);
// Set outputs
ExecutionContext.ExpressionValues["inputs"] = inputsData;
ExecutionContext.ExpressionValues["steps"] = ExecutionContext.Global.StepsContext.GetScope(childScopeName);
ProcessOutputs();
ExecutionContext.Global.StepsContext.ClearScope(childScopeName);
}
catch (Exception ex)
{
@@ -227,7 +134,7 @@ namespace GitHub.Runner.Worker.Handlers
}
}
private async Task RunStepsAsync(List<IStep> embeddedSteps, ActionRunStage stage)
private async Task RunStepsAsync(List<IStep> embeddedSteps)
{
ArgUtil.NotNull(embeddedSteps, nameof(embeddedSteps));
@@ -235,17 +142,6 @@ namespace GitHub.Runner.Worker.Handlers
{
Trace.Info($"Processing embedded step: DisplayName='{step.DisplayName}'");
// Add Expression Functions
step.ExecutionContext.ExpressionFunctions.Add(new FunctionInfo<HashFilesFunction>(PipelineTemplateConstants.HashFiles, 1, byte.MaxValue));
step.ExecutionContext.ExpressionFunctions.Add(new FunctionInfo<AlwaysFunction>(PipelineTemplateConstants.Always, 0, 0));
step.ExecutionContext.ExpressionFunctions.Add(new FunctionInfo<CancelledFunction>(PipelineTemplateConstants.Cancelled, 0, 0));
step.ExecutionContext.ExpressionFunctions.Add(new FunctionInfo<FailureFunction>(PipelineTemplateConstants.Failure, 0, 0));
step.ExecutionContext.ExpressionFunctions.Add(new FunctionInfo<SuccessFunction>(PipelineTemplateConstants.Success, 0, 0));
// Set action_status to the success of the current composite action
var actionResult = ExecutionContext.Result?.ToActionResult() ?? ActionResult.Success;
step.ExecutionContext.SetGitHubContext("action_status", actionResult.ToString());
// Initialize env context
Trace.Info("Initialize Env context for embedded step");
#if OS_WINDOWS
@@ -275,17 +171,16 @@ namespace GitHub.Runner.Worker.Handlers
}
}
var actionStep = step as IActionRunner;
try
{
if (step is IActionRunner actionStep)
// Evaluate and merge embedded-step env
var templateEvaluator = step.ExecutionContext.ToPipelineTemplateEvaluator();
var actionEnvironment = templateEvaluator.EvaluateStepEnvironment(actionStep.Action.Environment, step.ExecutionContext.ExpressionValues, step.ExecutionContext.ExpressionFunctions, Common.Util.VarUtil.EnvironmentVariableKeyComparer);
foreach (var env in actionEnvironment)
{
// Evaluate and merge embedded-step env
var templateEvaluator = step.ExecutionContext.ToPipelineTemplateEvaluator();
var actionEnvironment = templateEvaluator.EvaluateStepEnvironment(actionStep.Action.Environment, step.ExecutionContext.ExpressionValues, step.ExecutionContext.ExpressionFunctions, Common.Util.VarUtil.EnvironmentVariableKeyComparer);
foreach (var env in actionEnvironment)
{
envContext[env.Key] = new StringContextData(env.Value ?? string.Empty);
}
envContext[env.Key] = new StringContextData(env.Value ?? string.Empty);
}
}
catch (Exception ex)
@@ -296,119 +191,13 @@ namespace GitHub.Runner.Worker.Handlers
step.ExecutionContext.Complete(TaskResult.Failed);
}
// Register Callback
CancellationTokenRegistration? jobCancelRegister = null;
try
{
// Register job cancellation call back only if job cancellation token not been fire before each step run
if (!ExecutionContext.Root.CancellationToken.IsCancellationRequested)
{
// Test the condition again. The job was canceled after the condition was originally evaluated.
jobCancelRegister = ExecutionContext.Root.CancellationToken.Register(() =>
{
// Mark job as cancelled
ExecutionContext.Root.Result = TaskResult.Canceled;
ExecutionContext.Root.JobContext.Status = ExecutionContext.Root.Result?.ToActionResult();
step.ExecutionContext.Debug($"Re-evaluate condition on job cancellation for step: '{step.DisplayName}'.");
var conditionReTestTraceWriter = new ConditionTraceWriter(Trace, null); // host tracing only
var conditionReTestResult = false;
if (HostContext.RunnerShutdownToken.IsCancellationRequested)
{
step.ExecutionContext.Debug($"Skip Re-evaluate condition on runner shutdown.");
}
else
{
try
{
var templateEvaluator = step.ExecutionContext.ToPipelineTemplateEvaluator(conditionReTestTraceWriter);
var condition = new BasicExpressionToken(null, null, null, step.Condition);
conditionReTestResult = templateEvaluator.EvaluateStepIf(condition, step.ExecutionContext.ExpressionValues, step.ExecutionContext.ExpressionFunctions, step.ExecutionContext.ToExpressionState());
}
catch (Exception ex)
{
// Cancel the step since we get exception while re-evaluate step condition
Trace.Info("Caught exception from expression when re-test condition on job cancellation.");
step.ExecutionContext.Error(ex);
}
}
if (!conditionReTestResult)
{
// Cancel the step
Trace.Info("Cancel current running step.");
step.ExecutionContext.CancelToken();
}
});
}
else
{
if (ExecutionContext.Root.Result != TaskResult.Canceled)
{
// Mark job as cancelled
ExecutionContext.Root.Result = TaskResult.Canceled;
ExecutionContext.Root.JobContext.Status = ExecutionContext.Root.Result?.ToActionResult();
}
}
// Evaluate condition
step.ExecutionContext.Debug($"Evaluating condition for step: '{step.DisplayName}'");
var conditionTraceWriter = new ConditionTraceWriter(Trace, step.ExecutionContext);
var conditionResult = false;
var conditionEvaluateError = default(Exception);
if (HostContext.RunnerShutdownToken.IsCancellationRequested)
{
step.ExecutionContext.Debug($"Skip evaluate condition on runner shutdown.");
}
else
{
try
{
var templateEvaluator = step.ExecutionContext.ToPipelineTemplateEvaluator(conditionTraceWriter);
var condition = new BasicExpressionToken(null, null, null, step.Condition);
conditionResult = templateEvaluator.EvaluateStepIf(condition, step.ExecutionContext.ExpressionValues, step.ExecutionContext.ExpressionFunctions, step.ExecutionContext.ToExpressionState());
}
catch (Exception ex)
{
Trace.Info("Caught exception from expression.");
Trace.Error(ex);
conditionEvaluateError = ex;
}
}
if (!conditionResult && conditionEvaluateError == null)
{
// Condition is false
Trace.Info("Skipping step due to condition evaluation.");
step.ExecutionContext.Result = TaskResult.Skipped;
continue;
}
else if (conditionEvaluateError != null)
{
// Condition error
step.ExecutionContext.Error(conditionEvaluateError);
step.ExecutionContext.Result = TaskResult.Failed;
ExecutionContext.Result = TaskResult.Failed;
break;
}
else
{
await RunStepAsync(step);
}
}
finally
{
if (jobCancelRegister != null)
{
jobCancelRegister?.Dispose();
jobCancelRegister = null;
}
}
await RunStepAsync(step);
// Check failed or canceled
if (step.ExecutionContext.Result == TaskResult.Failed || step.ExecutionContext.Result == TaskResult.Canceled)
{
Trace.Info($"Update job result with current composite step result '{step.ExecutionContext.Result}'.");
ExecutionContext.Result = TaskResultUtil.MergeTaskResults(ExecutionContext.Result, step.ExecutionContext.Result.Value);
ExecutionContext.Result = step.ExecutionContext.Result;
break;
}
}
}

View File

@@ -1,4 +1,4 @@
using System.Collections.Generic;
using System.Collections.Generic;
using System.IO;
using System.Threading.Tasks;
using System;
@@ -69,20 +69,6 @@ namespace GitHub.Runner.Worker.Handlers
Data.Image = imageName;
}
string type = Action.Type == Pipelines.ActionSourceType.Repository ? "Dockerfile" : "DockerHub";
// Add Telemetry to JobContext to send with JobCompleteMessage
if (stage == ActionRunStage.Main)
{
var telemetry = new ActionsStepTelemetry {
Ref = GetActionRef(),
HasPreStep = Data.HasPre,
HasPostStep = Data.HasPost,
IsEmbedded = ExecutionContext.IsEmbedded,
Type = type
};
ExecutionContext.Root.ActionsStepsTelemetry.Add(telemetry);
}
// run container
var container = new ContainerInfo(HostContext)
{
@@ -214,11 +200,6 @@ namespace GitHub.Runner.Worker.Handlers
{
Environment["ACTIONS_CACHE_URL"] = cacheUrl;
}
if (systemConnection.Data.TryGetValue("GenerateIdTokenUrl", out var generateIdTokenUrl) && !string.IsNullOrEmpty(generateIdTokenUrl))
{
Environment["ACTIONS_ID_TOKEN_REQUEST_URL"] = generateIdTokenUrl;
Environment["ACTIONS_ID_TOKEN_REQUEST_TOKEN"] = systemConnection.Authorization.Parameters[EndpointAuthorizationParameters.AccessToken];
}
foreach (var variable in this.Environment)
{

View File

@@ -20,7 +20,6 @@ namespace GitHub.Runner.Worker.Handlers
IStepHost StepHost { get; set; }
Dictionary<string, string> Inputs { get; set; }
string ActionDirectory { get; set; }
List<JobExtensionRunner> LocalActionContainerSetupSteps { get; set; }
Task RunAsync(ActionRunStage stage);
void PrintActionDetails(ActionRunStage stage);
}
@@ -42,44 +41,9 @@ namespace GitHub.Runner.Worker.Handlers
public IStepHost StepHost { get; set; }
public Dictionary<string, string> Inputs { get; set; }
public string ActionDirectory { get; set; }
public List<JobExtensionRunner> LocalActionContainerSetupSteps { get; set; }
public virtual string GetActionRef()
{
if (Action.Type == Pipelines.ActionSourceType.ContainerRegistry)
{
var registryAction = Action as Pipelines.ContainerRegistryReference;
return registryAction.Image;
}
else if (Action.Type == Pipelines.ActionSourceType.Repository)
{
var repoAction = Action as Pipelines.RepositoryPathReference;
if (string.Equals(repoAction.RepositoryType, Pipelines.PipelineConstants.SelfAlias, StringComparison.OrdinalIgnoreCase))
{
return repoAction.Path;
}
else
{
if (string.IsNullOrEmpty(repoAction.Path))
{
return $"{repoAction.Name}@{repoAction.Ref}";
}
else
{
return $"{repoAction.Name}/{repoAction.Path}@{repoAction.Ref}";
}
}
}
else
{
// this should never happen
Trace.Error($"Can't generate ref for {Action.Type.ToString()}");
}
return "";
}
public virtual void PrintActionDetails(ActionRunStage stage)
{
if (stage == ActionRunStage.Post)
{
ExecutionContext.Output($"Post job cleanup.");

View File

@@ -19,8 +19,7 @@ namespace GitHub.Runner.Worker.Handlers
Dictionary<string, string> inputs,
Dictionary<string, string> environment,
Variables runtimeVariables,
string actionDirectory,
List<JobExtensionRunner> localActionContainerSetupSteps);
string actionDirectory);
}
public sealed class HandlerFactory : RunnerService, IHandlerFactory
@@ -33,8 +32,7 @@ namespace GitHub.Runner.Worker.Handlers
Dictionary<string, string> inputs,
Dictionary<string, string> environment,
Variables runtimeVariables,
string actionDirectory,
List<JobExtensionRunner> localActionContainerSetupSteps)
string actionDirectory)
{
// Validate args.
Trace.Entering();
@@ -86,7 +84,6 @@ namespace GitHub.Runner.Worker.Handlers
handler.StepHost = stepHost;
handler.Inputs = inputs;
handler.ActionDirectory = actionDirectory;
handler.LocalActionContainerSetupSteps = localActionContainerSetupSteps;
return handler;
}
}

View File

@@ -53,11 +53,6 @@ namespace GitHub.Runner.Worker.Handlers
{
Environment["ACTIONS_CACHE_URL"] = cacheUrl;
}
if (systemConnection.Data.TryGetValue("GenerateIdTokenUrl", out var generateIdTokenUrl) && !string.IsNullOrEmpty(generateIdTokenUrl))
{
Environment["ACTIONS_ID_TOKEN_REQUEST_URL"] = generateIdTokenUrl;
Environment["ACTIONS_ID_TOKEN_REQUEST_TOKEN"] = systemConnection.Authorization.Parameters[EndpointAuthorizationParameters.AccessToken];
}
// Resolve the target script.
string target = null;
@@ -74,20 +69,6 @@ namespace GitHub.Runner.Worker.Handlers
target = Data.Post;
}
// Add Telemetry to JobContext to send with JobCompleteMessage
if (stage == ActionRunStage.Main)
{
var telemetry = new ActionsStepTelemetry
{
Ref = GetActionRef(),
HasPreStep = Data.HasPre,
HasPostStep = Data.HasPost,
IsEmbedded = ExecutionContext.IsEmbedded,
Type = Data.NodeVersion
};
ExecutionContext.Root.ActionsStepsTelemetry.Add(telemetry);
}
ArgUtil.NotNullOrEmpty(target, nameof(target));
target = Path.Combine(ActionDirectory, target);
ArgUtil.File(target, nameof(target));
@@ -99,7 +80,7 @@ namespace GitHub.Runner.Worker.Handlers
workingDirectory = HostContext.GetDirectory(WellKnownDirectory.Work);
}
var nodeRuntimeVersion = await StepHost.DetermineNodeRuntimeVersion(ExecutionContext, Data.NodeVersion);
var nodeRuntimeVersion = await StepHost.DetermineNodeRuntimeVersion(ExecutionContext);
string file = Path.Combine(HostContext.GetDirectory(WellKnownDirectory.Externals), nodeRuntimeVersion, "bin", $"node{IOUtil.ExeExtension}");
// Format the arguments passed to node.

View File

@@ -210,10 +210,6 @@ namespace GitHub.Runner.Worker.Handlers
{
issueType = DTWebApi.IssueType.Warning;
}
else if (string.Equals(match.Severity, "notice", StringComparison.OrdinalIgnoreCase))
{
issueType = DTWebApi.IssueType.Notice;
}
else
{
_executionContext.Debug($"Skipped logging an issue for the matched line because the severity '{match.Severity}' is not supported.");

View File

@@ -23,6 +23,18 @@ namespace GitHub.Runner.Worker.Handlers
public override void PrintActionDetails(ActionRunStage stage)
{
// We don't want to display the internal workings if composite (similar/equivalent information can be found in debug)
void writeDetails(string message)
{
if (ExecutionContext.IsEmbedded)
{
ExecutionContext.Debug(message);
}
else
{
ExecutionContext.Output(message);
}
}
if (stage == ActionRunStage.Post)
{
@@ -40,7 +52,7 @@ namespace GitHub.Runner.Worker.Handlers
firstLine = firstLine.Substring(0, firstNewLine);
}
ExecutionContext.Output($"##[group]Run {firstLine}");
writeDetails(ExecutionContext.IsEmbedded ? $"Run {firstLine}" : $"##[group]Run {firstLine}");
}
else
{
@@ -51,7 +63,7 @@ namespace GitHub.Runner.Worker.Handlers
foreach (var line in multiLines)
{
// Bright Cyan color
ExecutionContext.Output($"\x1b[36;1m{line}\x1b[0m");
writeDetails($"\x1b[36;1m{line}\x1b[0m");
}
string argFormat;
@@ -110,23 +122,23 @@ namespace GitHub.Runner.Worker.Handlers
if (!string.IsNullOrEmpty(shellCommandPath))
{
ExecutionContext.Output($"shell: {shellCommandPath} {argFormat}");
writeDetails($"shell: {shellCommandPath} {argFormat}");
}
else
{
ExecutionContext.Output($"shell: {shellCommand} {argFormat}");
writeDetails($"shell: {shellCommand} {argFormat}");
}
if (this.Environment?.Count > 0)
{
ExecutionContext.Output("env:");
writeDetails("env:");
foreach (var env in this.Environment)
{
ExecutionContext.Output($" {env.Key}: {env.Value}");
writeDetails($" {env.Key}: {env.Value}");
}
}
ExecutionContext.Output("##[endgroup]");
writeDetails(ExecutionContext.IsEmbedded ? "" : "##[endgroup]");
}
public async Task RunAsync(ActionRunStage stage)
@@ -144,17 +156,6 @@ namespace GitHub.Runner.Worker.Handlers
var githubContext = ExecutionContext.ExpressionValues["github"] as GitHubContext;
ArgUtil.NotNull(githubContext, nameof(githubContext));
// Add Telemetry to JobContext to send with JobCompleteMessage
if (stage == ActionRunStage.Main)
{
var telemetry = new ActionsStepTelemetry
{
IsEmbedded = ExecutionContext.IsEmbedded,
Type = "run",
};
ExecutionContext.Root.ActionsStepsTelemetry.Add(telemetry);
}
var tempDirectory = HostContext.GetDirectory(WellKnownDirectory.Temp);
Inputs.TryGetValue("script", out var contents);
@@ -277,13 +278,6 @@ namespace GitHub.Runner.Worker.Handlers
fileName = node12;
}
#endif
var systemConnection = ExecutionContext.Global.Endpoints.Single(x => string.Equals(x.Name, WellKnownServiceEndpointNames.SystemVssConnection, StringComparison.OrdinalIgnoreCase));
if (systemConnection.Data.TryGetValue("GenerateIdTokenUrl", out var generateIdTokenUrl) && !string.IsNullOrEmpty(generateIdTokenUrl))
{
Environment["ACTIONS_ID_TOKEN_REQUEST_URL"] = generateIdTokenUrl;
Environment["ACTIONS_ID_TOKEN_REQUEST_TOKEN"] = systemConnection.Authorization.Parameters[EndpointAuthorizationParameters.AccessToken];
}
ExecutionContext.Debug($"{fileName} {arguments}");
using (var stdoutManager = new OutputManager(ExecutionContext, ActionCommandManager))

View File

@@ -23,7 +23,7 @@ namespace GitHub.Runner.Worker.Handlers
string ResolvePathForStepHost(string path);
Task<string> DetermineNodeRuntimeVersion(IExecutionContext executionContext, string preferredVersion);
Task<string> DetermineNodeRuntimeVersion(IExecutionContext executionContext);
Task<int> ExecuteAsync(string workingDirectory,
string fileName,
@@ -58,9 +58,9 @@ namespace GitHub.Runner.Worker.Handlers
return path;
}
public Task<string> DetermineNodeRuntimeVersion(IExecutionContext executionContext, string preferredVersion)
public Task<string> DetermineNodeRuntimeVersion(IExecutionContext executionContext)
{
return Task.FromResult<string>(preferredVersion);
return Task.FromResult<string>("node12");
}
public async Task<int> ExecuteAsync(string workingDirectory,
@@ -123,7 +123,7 @@ namespace GitHub.Runner.Worker.Handlers
}
}
public async Task<string> DetermineNodeRuntimeVersion(IExecutionContext executionContext, string preferredVersion)
public async Task<string> DetermineNodeRuntimeVersion(IExecutionContext executionContext)
{
// Best effort to determine a compatible node runtime
// There may be more variation in which libraries are linked than just musl/glibc,
@@ -148,14 +148,14 @@ namespace GitHub.Runner.Worker.Handlers
var msg = $"JavaScript Actions in Alpine containers are only supported on x64 Linux runners. Detected {os} {arch}";
throw new NotSupportedException(msg);
}
nodeExternal = $"{preferredVersion}_alpine";
nodeExternal = "node12_alpine";
executionContext.Debug($"Container distribution is alpine. Running JavaScript Action with external tool: {nodeExternal}");
return nodeExternal;
}
}
}
// Optimistically use the default
nodeExternal = preferredVersion;
nodeExternal = "node12";
executionContext.Debug($"Running JavaScript Action with default external tool: {nodeExternal}");
return nodeExternal;
}
@@ -188,7 +188,7 @@ namespace GitHub.Runner.Worker.Handlers
{
// e.g. -e MY_SECRET maps the value into the exec'ed process without exposing
// the value directly in the command
dockerCommandArgs.Add(DockerUtil.CreateEscapedOption("-e", env.Key));
dockerCommandArgs.Add($"-e {env.Key}");
}
if (!string.IsNullOrEmpty(PrependPath))
{

View File

@@ -350,7 +350,6 @@ namespace GitHub.Runner.Worker
case "":
case "ERROR":
case "WARNING":
case "NOTICE":
break;
default:
throw new ArgumentException($"Matcher '{_owner}' contains unexpected default severity '{_severity}'");

View File

@@ -55,7 +55,7 @@ namespace GitHub.Runner.Worker
ArgUtil.NotNull(message, nameof(message));
// Create a new timeline record for 'Set up job'
IExecutionContext context = jobContext.CreateChild(Guid.NewGuid(), "Set up job", $"{nameof(JobExtension)}_Init", null, null, ActionRunStage.Pre);
IExecutionContext context = jobContext.CreateChild(Guid.NewGuid(), "Set up job", $"{nameof(JobExtension)}_Init", null, null);
List<IStep> preJobSteps = new List<IStep>();
List<IStep> jobSteps = new List<IStep>();
@@ -142,12 +142,6 @@ namespace GitHub.Runner.Worker
Trace.Error(ex);
}
var secretSource = context.GetGitHubContext("secret_source");
if (!string.IsNullOrEmpty(secretSource))
{
context.Output($"Secret source: {secretSource}");
}
var repoFullName = context.GetGitHubContext("repository");
ArgUtil.NotNull(repoFullName, nameof(repoFullName));
context.Debug($"Primary repository: {repoFullName}");
@@ -312,13 +306,13 @@ namespace GitHub.Runner.Worker
JobExtensionRunner extensionStep = step as JobExtensionRunner;
ArgUtil.NotNull(extensionStep, extensionStep.DisplayName);
Guid stepId = Guid.NewGuid();
extensionStep.ExecutionContext = jobContext.CreateChild(stepId, extensionStep.DisplayName, null, null, stepId.ToString("N"), ActionRunStage.Pre);
extensionStep.ExecutionContext = jobContext.CreateChild(stepId, extensionStep.DisplayName, null, null, stepId.ToString("N"));
}
else if (step is IActionRunner actionStep)
{
ArgUtil.NotNull(actionStep, step.DisplayName);
Guid stepId = Guid.NewGuid();
actionStep.ExecutionContext = jobContext.CreateChild(stepId, actionStep.DisplayName, stepId.ToString("N"), null, null, ActionRunStage.Pre, intraActionStates[actionStep.Action.Id]);
actionStep.ExecutionContext = jobContext.CreateChild(stepId, actionStep.DisplayName, stepId.ToString("N"), null, null, intraActionStates[actionStep.Action.Id]);
}
}
@@ -329,7 +323,7 @@ namespace GitHub.Runner.Worker
{
ArgUtil.NotNull(actionStep, step.DisplayName);
intraActionStates.TryGetValue(actionStep.Action.Id, out var intraActionState);
actionStep.ExecutionContext = jobContext.CreateChild(actionStep.Action.Id, actionStep.DisplayName, actionStep.Action.Name, null, actionStep.Action.ContextName, ActionRunStage.Main, intraActionState);
actionStep.ExecutionContext = jobContext.CreateChild(actionStep.Action.Id, actionStep.DisplayName, actionStep.Action.Name, null, actionStep.Action.ContextName, intraActionState);
}
}
@@ -400,7 +394,7 @@ namespace GitHub.Runner.Worker
ArgUtil.NotNull(jobContext, nameof(jobContext));
// create a new timeline record node for 'Finalize job'
IExecutionContext context = jobContext.CreateChild(Guid.NewGuid(), "Complete job", $"{nameof(JobExtension)}_Final", null, null, ActionRunStage.Post);
IExecutionContext context = jobContext.CreateChild(Guid.NewGuid(), "Complete job", $"{nameof(JobExtension)}_Final", null, null);
using (var register = jobContext.CancellationToken.Register(() => { context.CancelToken(); }))
{
try

View File

@@ -7,7 +7,6 @@ using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using System.Net.Http;
@@ -106,10 +105,6 @@ namespace GitHub.Runner.Worker
}
jobContext.SetRunnerContext("os", VarUtil.OS);
jobContext.SetRunnerContext("arch", VarUtil.OSArchitecture);
var runnerSettings = HostContext.GetService<IConfigurationStore>().GetSettings();
jobContext.SetRunnerContext("name", runnerSettings.AgentName);
string toolsDirectory = HostContext.GetDirectory(WellKnownDirectory.Tools);
Directory.CreateDirectory(toolsDirectory);
@@ -150,16 +145,6 @@ namespace GitHub.Runner.Worker
Trace.Verbose($"Job steps: '{string.Join(", ", jobSteps.Select(x => x.DisplayName))}'");
HostContext.WritePerfCounter($"WorkerJobInitialized_{message.RequestId.ToString()}");
if (systemConnection.Data.TryGetValue("GenerateIdTokenUrl", out var generateIdTokenUrl) &&
!string.IsNullOrEmpty(generateIdTokenUrl))
{
// Server won't issue ID_TOKEN for non-inprogress job.
// If the job is trying to use OIDC feature, we want the job to be marked as in-progress before running any customer's steps as much as we can.
// Timeline record update background process runs every 500ms, so delay 1000ms is enough for most of the cases
Trace.Info($"Waiting for job to be marked as started.");
await Task.WhenAny(_jobServerQueue.JobRecordUpdated.Task, Task.Delay(1000));
}
// Run all job steps
Trace.Info("Run all job steps.");
var stepsRunner = HostContext.GetService<IStepsRunner>();
@@ -230,15 +215,8 @@ namespace GitHub.Runner.Worker
return result;
}
// Load any upgrade telemetry
LoadFromTelemetryFile(jobContext.JobTelemetry);
// Make sure we don't submit secrets as telemetry
MaskTelemetrySecrets(jobContext.JobTelemetry);
Trace.Info("Raising job completed event.");
var jobCompletedEvent = new JobCompletedEvent(message.RequestId, message.JobId, result, jobContext.JobOutputs, jobContext.ActionsEnvironment, jobContext.ActionsStepsTelemetry, jobContext.JobTelemetry);
var jobCompletedEvent = new JobCompletedEvent(message.RequestId, message.JobId, result, jobContext.JobOutputs, jobContext.ActionsEnvironment);
var completeJobRetryLimit = 5;
var exceptions = new List<Exception>();
@@ -282,38 +260,6 @@ namespace GitHub.Runner.Worker
throw new AggregateException(exceptions);
}
private void MaskTelemetrySecrets(List<JobTelemetry> jobTelemetry)
{
foreach (var telemetryItem in jobTelemetry)
{
telemetryItem.Message = HostContext.SecretMasker.MaskSecrets(telemetryItem.Message);
}
}
private void LoadFromTelemetryFile(List<JobTelemetry> jobTelemetry)
{
try
{
var telemetryFilePath = HostContext.GetConfigFile(WellKnownConfigFile.Telemetry);
if (File.Exists(telemetryFilePath))
{
var telemetryData = File.ReadAllText(telemetryFilePath, Encoding.UTF8);
var telemetry = new JobTelemetry
{
Message = $"Runner File Telemetry:\n{telemetryData}",
Type = JobTelemetryType.General
};
jobTelemetry.Add(telemetry);
IOUtil.DeleteFile(telemetryFilePath);
}
}
catch (Exception e)
{
Trace.Error("Error when trying to load telemetry from telemetry file");
Trace.Error(e);
}
}
private async Task ShutdownQueue(bool throwOnFailure)
{
if (_jobServerQueue != null)

View File

@@ -5,6 +5,7 @@
<OutputType>Exe</OutputType>
<RuntimeIdentifiers>win-x64;win-x86;linux-x64;linux-arm64;linux-arm;osx-x64</RuntimeIdentifiers>
<TargetLatestRuntimePatch>true</TargetLatestRuntimePatch>
<AssetTargetFallback>portable-net45+win8</AssetTargetFallback>
<NoWarn>NU1701;NU1603</NoWarn>
<Version>$(Version)</Version>
<TieredCompilationQuickJit>true</TieredCompilationQuickJit>

View File

@@ -354,5 +354,43 @@ namespace GitHub.Runner.Worker
executionContext.Complete(result, resultCode: resultCode);
}
private sealed class ConditionTraceWriter : ObjectTemplating::ITraceWriter
{
private readonly IExecutionContext _executionContext;
private readonly Tracing _trace;
private readonly StringBuilder _traceBuilder = new StringBuilder();
public string Trace => _traceBuilder.ToString();
public ConditionTraceWriter(Tracing trace, IExecutionContext executionContext)
{
ArgUtil.NotNull(trace, nameof(trace));
_trace = trace;
_executionContext = executionContext;
}
public void Error(string format, params Object[] args)
{
var message = StringUtil.Format(format, args);
_trace.Error(message);
_executionContext?.Debug(message);
}
public void Info(string format, params Object[] args)
{
var message = StringUtil.Format(format, args);
_trace.Info(message);
_executionContext?.Debug(message);
_traceBuilder.AppendLine(message);
}
public void Verbose(string format, params Object[] args)
{
var message = StringUtil.Format(format, args);
_trace.Verbose(message);
_executionContext?.Debug(message);
}
}
}
}

View File

@@ -63,6 +63,7 @@ namespace GitHub.Runner.Worker
Trace.Info("Message received.");
ArgUtil.Equal(MessageType.NewJobRequest, channelMessage.MessageType, nameof(channelMessage.MessageType));
ArgUtil.NotNullOrEmpty(channelMessage.Body, nameof(channelMessage.Body));
Trace.Info(channelMessage.Body);
var jobMessage = StringUtil.ConvertFromJson<Pipelines.AgentJobRequestMessage>(channelMessage.Body);
ArgUtil.NotNull(jobMessage, nameof(jobMessage));
HostContext.WritePerfCounter($"WorkerJobMessageReceived_{jobMessage.RequestId.ToString()}");

View File

@@ -46,7 +46,7 @@
"runs": {
"one-of": [
"container-runs",
"node-runs",
"node12-runs",
"plugin-runs",
"composite-runs"
]
@@ -80,7 +80,7 @@
"loose-value-type": "string"
}
},
"node-runs": {
"node12-runs": {
"mapping": {
"properties": {
"using": "non-empty-string",
@@ -113,17 +113,10 @@
}
},
"composite-step": {
"one-of": [
"run-step",
"uses-step"
]
},
"run-step": {
"mapping": {
"properties": {
"name": "string-steps-context",
"id": "non-empty-string",
"if": "step-if",
"run": {
"type": "string-steps-context",
"required": true
@@ -137,21 +130,6 @@
}
}
},
"uses-step": {
"mapping": {
"properties": {
"name": "string-steps-context",
"id": "non-empty-string",
"if": "step-if",
"uses": {
"type": "non-empty-string",
"required": true
},
"with": "step-with",
"env": "step-env"
}
}
},
"container-runs-context": {
"context": [
"inputs"
@@ -217,41 +195,6 @@
"loose-key-type": "non-empty-string",
"loose-value-type": "string"
}
},
"step-if": {
"context": [
"github",
"inputs",
"strategy",
"matrix",
"steps",
"job",
"runner",
"env",
"always(0,0)",
"failure(0,0)",
"cancelled(0,0)",
"success(0,0)",
"hashFiles(1,255)"
],
"string": {}
},
"step-with": {
"context": [
"github",
"inputs",
"strategy",
"matrix",
"steps",
"job",
"runner",
"env",
"hashFiles(1,255)"
],
"mapping": {
"loose-key-type": "non-empty-string",
"loose-value-type": "string"
}
}
}
}
}

View File

@@ -768,7 +768,6 @@ namespace GitHub.DistributedTask.WebApi
/// <param name="poolId"></param>
/// <param name="agentId"></param>
/// <param name="currentState"></param>
/// <param name="updateTrace"></param>
/// <param name="userState"></param>
/// <param name="cancellationToken">The cancellation token to cancel operation.</param>
[EditorBrowsable(EditorBrowsableState.Never)]
@@ -776,7 +775,6 @@ namespace GitHub.DistributedTask.WebApi
int poolId,
int agentId,
string currentState,
string updateTrace,
object userState = null,
CancellationToken cancellationToken = default)
{
@@ -786,7 +784,6 @@ namespace GitHub.DistributedTask.WebApi
List<KeyValuePair<string, string>> queryParams = new List<KeyValuePair<string, string>>();
queryParams.Add("currentState", currentState);
queryParams.Add("updateTrace", updateTrace);
return SendAsync<TaskAgent>(
httpMethod,
@@ -797,5 +794,65 @@ namespace GitHub.DistributedTask.WebApi
userState: userState,
cancellationToken: cancellationToken);
}
/// <summary>
/// [Preview API]
/// </summary>
/// <param name="poolId"></param>
/// <param name="agentId"></param>
/// <param name="userState"></param>
/// <param name="cancellationToken">The cancellation token to cancel operation.</param>
public Task<String> GetAgentAuthUrlAsync(
int poolId,
int agentId,
object userState = null,
CancellationToken cancellationToken = default)
{
HttpMethod httpMethod = new HttpMethod("GET");
Guid locationId = new Guid("a82a119c-1e46-44b6-8d75-c82a79cf975b");
object routeValues = new { poolId = poolId, agentId = agentId };
return SendAsync<String>(
httpMethod,
locationId,
routeValues: routeValues,
version: new ApiResourceVersion(6.0, 1),
userState: userState,
cancellationToken: cancellationToken);
}
/// <summary>
/// [Preview API]
/// </summary>
/// <param name="poolId"></param>
/// <param name="agentId"></param>
/// <param name="error"></param>
/// <param name="userState"></param>
/// <param name="cancellationToken">The cancellation token to cancel operation.</param>
[EditorBrowsable(EditorBrowsableState.Never)]
public virtual async Task ReportAgentAuthUrlMigrationErrorAsync(
int poolId,
int agentId,
string error,
object userState = null,
CancellationToken cancellationToken = default)
{
HttpMethod httpMethod = new HttpMethod("POST");
Guid locationId = new Guid("a82a119c-1e46-44b6-8d75-c82a79cf975b");
object routeValues = new { poolId = poolId, agentId = agentId };
HttpContent content = new ObjectContent<string>(error, new VssJsonMediaTypeFormatter(true));
using (HttpResponseMessage response = await SendAsync(
httpMethod,
locationId,
routeValues: routeValues,
version: new ApiResourceVersion(6.0, 1),
userState: userState,
cancellationToken: cancellationToken,
content: content).ConfigureAwait(false))
{
return;
}
}
}
}

View File

@@ -2,7 +2,6 @@
using System.ComponentModel;
using System.Security;
using System.Text;
using System.Text.RegularExpressions;
using Newtonsoft.Json;
namespace GitHub.DistributedTask.Logging
@@ -81,65 +80,6 @@ namespace GitHub.DistributedTask.Logging
return trimmed;
}
public static String PowerShellPreAmpersandEscape(String value)
{
// if the secret is passed to PS as a command and it causes an error, sections of it can be surrounded by color codes
// or printed individually.
// The secret secretpart1&secretpart2&secretpart3 would be split into 2 sections:
// 'secretpart1&secretpart2&' and 'secretpart3'. This method masks for the first section.
// The secret secretpart1&+secretpart2&secretpart3 would be split into 2 sections:
// 'secretpart1&+' and (no 's') 'ecretpart2&secretpart3'. This method masks for the first section.
var trimmed = string.Empty;
if (!string.IsNullOrEmpty(value) && value.Contains("&"))
{
var secretSection = string.Empty;
if (value.Contains("&+"))
{
secretSection = value.Substring(0, value.IndexOf("&+") + "&+".Length);
}
else
{
secretSection = value.Substring(0, value.LastIndexOf("&") + "&".Length);
}
// Don't mask short secrets
if (secretSection.Length >= 6)
{
trimmed = secretSection;
}
}
return trimmed;
}
public static String PowerShellPostAmpersandEscape(String value)
{
var trimmed = string.Empty;
if (!string.IsNullOrEmpty(value) && value.Contains("&"))
{
var secretSection = string.Empty;
if (value.Contains("&+"))
{
// +1 to skip the letter that got colored
secretSection = value.Substring(value.IndexOf("&+") + "&+".Length + 1);
}
else
{
secretSection = value.Substring(value.LastIndexOf("&") + "&".Length);
}
if (secretSection.Length >= 6)
{
trimmed = secretSection;
}
}
return trimmed;
}
private static string Base64StringEscapeShift(String value, int shift)
{
var bytes = Encoding.UTF8.GetBytes(value);

View File

@@ -638,7 +638,6 @@ namespace GitHub.DistributedTask.Pipelines.ObjectTemplating
new NamedValueInfo<NoOperationNamedValue>(PipelineTemplateConstants.Matrix),
new NamedValueInfo<NoOperationNamedValue>(PipelineTemplateConstants.Steps),
new NamedValueInfo<NoOperationNamedValue>(PipelineTemplateConstants.GitHub),
new NamedValueInfo<NoOperationNamedValue>(PipelineTemplateConstants.Inputs),
new NamedValueInfo<NoOperationNamedValue>(PipelineTemplateConstants.Job),
new NamedValueInfo<NoOperationNamedValue>(PipelineTemplateConstants.Runner),
new NamedValueInfo<NoOperationNamedValue>(PipelineTemplateConstants.Env),

View File

@@ -18,12 +18,5 @@ namespace GitHub.DistributedTask.WebApi
get;
set;
}
[DataMember]
public string Path
{
get;
set;
}
}
}

View File

@@ -1,36 +0,0 @@
using System.Runtime.Serialization;
namespace GitHub.DistributedTask.WebApi
{
/// <summary>
/// Information about a step run on the runner
/// </summary>
[DataContract]
public class ActionsStepTelemetry
{
[DataMember(EmitDefaultValue = false)]
public string Ref { get; set; }
[DataMember(EmitDefaultValue = false)]
public string Type { get; set; }
[DataMember(EmitDefaultValue = false)]
public bool? HasRunsStep { get; set; }
[DataMember(EmitDefaultValue = false)]
public bool? HasUsesStep { get; set; }
[DataMember(EmitDefaultValue = false)]
public bool IsEmbedded { get; set; }
[DataMember(EmitDefaultValue = false)]
public bool? HasPreStep { get; set; }
[DataMember(EmitDefaultValue = false)]
public bool? HasPostStep { get; set; }
[DataMember(EmitDefaultValue = false)]
public int? StepCount { get; set; }
}
}

View File

@@ -9,9 +9,6 @@ namespace GitHub.DistributedTask.WebApi
Error = 1,
[EnumMember]
Warning = 2,
[EnumMember]
Notice = 3
Warning = 2
}
}

View File

@@ -142,32 +142,6 @@ namespace GitHub.DistributedTask.WebApi
this.ActionsEnvironment = actionsEnvironment;
}
public JobCompletedEvent(
Int64 requestId,
Guid jobId,
TaskResult result,
Dictionary<String, VariableValue> outputs,
ActionsEnvironmentReference actionsEnvironment,
List<ActionsStepTelemetry> actionsStepsTelemetry)
: this(requestId, jobId, result, outputs)
{
this.ActionsEnvironment = actionsEnvironment;
this.ActionsStepsTelemetry = actionsStepsTelemetry;
}
public JobCompletedEvent(
Int64 requestId,
Guid jobId,
TaskResult result,
Dictionary<String, VariableValue> outputs,
ActionsEnvironmentReference actionsEnvironment,
List<ActionsStepTelemetry> actionsStepsTelemetry,
List<JobTelemetry> jobTelemetry)
: this(requestId, jobId, result, outputs, actionsEnvironment, actionsStepsTelemetry)
{
this.JobTelemetry = jobTelemetry;
}
[DataMember(EmitDefaultValue = false)]
public Int64 RequestId
{
@@ -195,20 +169,6 @@ namespace GitHub.DistributedTask.WebApi
get;
set;
}
[DataMember(EmitDefaultValue = false)]
public List<ActionsStepTelemetry> ActionsStepsTelemetry
{
get;
set;
}
[DataMember(EmitDefaultValue = false)]
public List<JobTelemetry> JobTelemetry
{
get;
set;
}
}
[DataContract]

View File

@@ -1,17 +0,0 @@
using System.Runtime.Serialization;
namespace GitHub.DistributedTask.WebApi
{
/// <summary>
/// Information about a job run on the runner
/// </summary>
[DataContract]
public class JobTelemetry
{
[DataMember(EmitDefaultValue = false)]
public string Message { get; set; }
[DataMember(EmitDefaultValue = false)]
public JobTelemetryType Type { get; set; }
}
}

View File

@@ -1,13 +0,0 @@
using System.Runtime.Serialization;
namespace GitHub.DistributedTask.WebApi
{
public enum JobTelemetryType
{
[EnumMember]
General = 0,
[EnumMember]
ActionCommand = 1,
}
}

View File

@@ -24,7 +24,6 @@ namespace GitHub.DistributedTask.WebApi
this.OSDescription = referenceToBeCloned.OSDescription;
this.ProvisioningState = referenceToBeCloned.ProvisioningState;
this.AccessPoint = referenceToBeCloned.AccessPoint;
this.Ephemeral = referenceToBeCloned.Ephemeral;
if (referenceToBeCloned.m_links != null)
{
@@ -82,16 +81,6 @@ namespace GitHub.DistributedTask.WebApi
set;
}
/// <summary>
/// Signifies that this Agent can only run one job and will be removed by the server after that one job finish.
/// </summary>
[DataMember]
public bool? Ephemeral
{
get;
set;
}
/// <summary>
/// Whether or not the agent is online.
/// </summary>

View File

@@ -38,7 +38,6 @@ namespace GitHub.DistributedTask.WebApi
this.RefName = recordToBeCloned.RefName;
this.ErrorCount = recordToBeCloned.ErrorCount;
this.WarningCount = recordToBeCloned.WarningCount;
this.NoticeCount = recordToBeCloned.NoticeCount;
this.AgentPlatform = recordToBeCloned.AgentPlatform;
if (recordToBeCloned.Log != null)
@@ -223,13 +222,6 @@ namespace GitHub.DistributedTask.WebApi
set;
}
[DataMember(Order = 55)]
public Int32? NoticeCount
{
get;
set;
}
public List<Issue> Issues
{
get

View File

@@ -5,6 +5,7 @@
<OutputType>Library</OutputType>
<RuntimeIdentifiers>win-x64;win-x86;linux-x64;linux-arm64;linux-arm;osx-x64</RuntimeIdentifiers>
<TargetLatestRuntimePatch>true</TargetLatestRuntimePatch>
<AssetTargetFallback>portable-net45+win8</AssetTargetFallback>
<NoWarn>NU1701;NU1603</NoWarn>
<Version>$(Version)</Version>
<DefineConstants>TRACE</DefineConstants>

Some files were not shown because too many files have changed in this diff Show More