Compare commits

...

407 Commits

Author SHA1 Message Date
Thomas Boop
709bd6113d Update releaseVersion 2022-06-22 12:07:32 -04:00
Thomas Boop
d2c6a4e4bc 294.0 release notes (#1963)
* 293.1 release notes

* let make it a minor version bump
2022-06-22 11:57:10 -04:00
Stefan Ruvceski
d11bd3d8be Created env var for forcing node12 actions to run on node16 (#1913)
* Created env var for forcing node12 actions to run on node16

* get value of hostContext environment variable

* changing location of forced node version check

* small code refactoring

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>

* more of small code refactoring

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>

* refactoring of conditions for getting internal node version

* changing expected value for node version env var

* Adding empty line between two methods

* Created method GetNodeVersion

* GetNodeVersion from function to inline call and PR fixes

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>
Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>
2022-06-22 11:13:28 -04:00
Tingluo Huang
761785620f Support pass runner JitConfig as arg. (#1925) 2022-06-22 10:59:29 -04:00
Ferenc Hammerl
416771d4b1 Fix PrependPath format to be array instead of a concatenated string (#1948)
* Fix prependPath format
2022-06-22 09:11:17 -04:00
Tatyana Kostromskaya
9499f477a2 Add retry logic around getting job messages from broker (#1939)
* Jsut simple solution without additional funcs

* Delete old comment

* resolve

* Refactor retry function, make it more common

* Make retry function generic, get rid of extra params

* delete extra using

* Add cancellation token and limit of attempts

* Add some additional logging

* Rework condition

* replace to do..while

* return `while (true)` to simplify code structure

* Add other cancelling token, add TODO comment
2022-06-21 16:12:07 +02:00
Ferenc Hammerl
6bc6d475f9 No longer trace 'ex' twice, only further up in the callstack (#1949) 2022-06-16 16:36:54 -04:00
Tuukka Lahti
ca2b1bc6d5 Update dependencies list to support Ubuntu 22.04 (#1946) 2022-06-15 12:37:02 -04:00
Ferenc Hammerl
591f8c3510 Runner container hooks Beta (#1853)
* Added ability to run Dockerfile.SUFFIX ContainerAction

* Extracted IsDockerFile method

* reformatted, moved from index to Last()

* extracted IsDockerfile to DockerUtil with L0

* added check for IsDockerfile to account for docker://

* updated test to clearly show path/dockerfile:tag

* fail if Data.Image is not Dockerfile or docker://[image]

* Setup noops for JobPrepare and JobCleanup hooks

* Add container jobstarted and jobcomplete hooks

* Run 'index.js' instead of specific command hooks

* Call jobprepare with command arg

* Use right command name (hardcoded)

Co-authored-by: Nikola Jokic <nikola-jokic@users.noreply.github.com>

* Invoke hooks with arguments

* Add PrepareJob hook to work with jobcontainers

Co-authored-by: Nikola Jokic <nikola-jokic@users.noreply.github.com>

* Rename methods

* Use new hookcontainer to run prep and clean hooks

* Get path from ENV

* Use enums

* Use IOUtils.cs

* Move container files to folder

* Move namespaces

* Store "state" between hooks

* Remove stdin stream in containerstephosts

* Update Constants.cs

* Throw if stdin fails

* Cleanup obvious nullrefs and unused vars

* Cleanup containerhook directory

* Call step exec hook

* Fix windows build

* Remove hook from hookContainer

* Rename file

* More renamings

* Add TODOs

* Fix env name

* Fix missing imports

* Fix imports

* Run script step on jobcontainer

* Enable feature if env is set

* Update ContainerHookManager.cs

* Update ContainerHookManager.cs

* Hooks allowed to work even when context isn't returned

* Custom hooks enabled flag and additional null checks

* New line at the end of the FeatureFlagManager.cs

* Code refactoring

* Supported just in time container building or pulling

* Try mock-build for osx

* Build all platforms

* Run mock on self-hosted

* Remove GITHUB prefix

* Use ContainerHooksPath instead of CustomHooksPath

* Null checks simplified

* Code refactoring

* Changing condition for image builing/pulling

* Code refactoring

* TODO comment removed

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>

* Call container step if FF is on

* Rename run script function

* Use JToken instead of dynamic

* Add TODO

* Small refactoring + renames + TODOs

* Throw on DetermineNodeRuntimeVersion

* Fix formatting

* Add run-container-step

* Supported nodeJS in Alpine containers

* Renamed Alpine to IsAlpine in HookResponse

* Method for checking platform for alpine container

* Added container hooks feature flag check

* Update IsHookFeatureEnabled with new params

* Rename featureflag method

* Finish rename

* Set collection null values to empty arrays when JSON serialising them

* Disable FF until we merge

* Update src/Runner.Worker/Container/ContainerHooks/HookContainer.cs

* Fix method name

* Change hookargs to superclass from interface

* Using only Path.Combine in GenerateResponsePath

* fix merge error

* EntryPointArgs changed to list of args instead of one args string

* Changed List to IEnumerable for EntryPointArgs and MountVolumes

* Get ContainerRuntimePath for JobContainers from hooks

* Read ContainerEnv from response file

* Port mappings saved after creating services

* Support case when responseFile doesn't exist

* Check if response file exists

* Logging in ExecuteHookScript

* Save hook state after all 4 hooks

* Code refactoring

* Remove TODO

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>

* Remove second TODO

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>

* Removing container env changes

* Removing containerEnv and dockerManager

* Delete mock-build.yml

* Update IOUtil.cs

* Add comment about containerhooks

* Fix merge mistake

* Remove solved todo

* Determine which shell to use for hooks scenario

* Overload for method ExecuteHookScript with prependPath as arg

* Adding HostContext to the GetDefaultShellForScript call

* prependPath as a mandatory parameter

* Improve logging for hooks

* Small changes in logging

* Allow null for ContainerEntryPointArgs

* Changed log messages

* Skip setting EntryPoint and EntryPointArgs if hooks are enabled

* Throw if IsAlpine is null in PrepareJob

* Code refactoring - added GetAndValidateResponse method

* Code refactoring

* Changes in exception message

* Only save hookState if returned

* Use FF from server

* Empty line

* Code refactoring

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>

* Send null instead of string empty

* Remove TODO

* Code refactoring and some small changes

* Allow Globals to be null to pass L0

* Fix setup in StepHostL0

* Throw exception earlier if response file doesn't exist and prepare_job hook is running

* Refactoring GetResponse method

* Changing exception message if response file is not found

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>

* Chaning exception message if isAlpine is null for prepare_job hook

* Rename hook folder

* Fail if compatible hookfile not found

* Use .Value instead of casting bool? to bool

* Format spacing

* Formatting

* User user and system mvs

* Use variables instead of entire context in featuremanager

* Update stepTelemetry if step uses containerhooks

* Restore  import

* Remove unneccessary field from HookContainer

* Refactor response context and portmappings

* Force allow hooks if FF is on

* Code refactoring

* Revert deleting usings

* Better hookContainer defaults and use correct portmapping list

* Make GetDefaultShellForScript a  HostContext extension method

* Generic hookresponse

* Code refactoring, unnecessary properties removed - HookContainer moved to the HookInput.cs

* Remove empty line

* Code refactoring and better exception handling

* code refactor, removing unnecessary props

* Move hookstate to global ContainerHookState

* Trace exception before we throw it for not losing information

* Fix for null ref exception in GetResponse

* Adding additional check for null response in prepareJob hook

* Refactoring GetResponse with additional check

* Update error messages

* Ports in ResponseContainer changed from IList to IDictionary

* Fix port format

* Include dockerfile

* Send null Registry obj if there's nothing in it

* Minor formatting

* Check if hookIndexPath exists relocated to the ContainerHookManager

* Code refactoring - ValidateHookExecutable added to the ContainerHookManager

* check if ContainerHooksPath when AllowRunnerContainerHooks is on

* Submit JSON telemetry instead of boolean

* Prefix step hooks with "run"

* Rename FeatureManager

* Fix flipped condition

* Unify js shell path getter with ps1 and sh getter

* Validate on run, not on instantiation of manager

* Cleanup ExecuteAsync methods

* Handle exception in executeHookScript

* Better exception types

* Remove comment

* Simplify boolean

* Allow jobs without jobContainer to run

* Use JObject instead of JToken

* Use correct Response type

* Format class to move cleanupJobAsync to the end of public methods

* Rename HookIndexPath to HookScriptPath

* Refactor methods into expression bodies

* Fix args class hierarchy

* Fix argument order

* Formatting

* Fix nullref and don't swallow stacktrace

* Whilelist HookArgs

* Use FF in FeatureManager

* Update src/Runner.Worker/ContainerOperationProvider.cs

Co-authored-by: Tingluo Huang <tingluohuang@github.com>

* Update src/Runner.Worker/ActionRunner.cs

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>

* Update src/Runner.Worker/ActionRunner.cs

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>

* Only mount well known dirs to job containers

* Get trace from hostcontext

* Use hook execution for setting telemetry

Co-authored-by: Nikola Jokic <nikola.jokic@akvelon.com>
Co-authored-by: Nikola Jokic <nikola-jokic@users.noreply.github.com>
Co-authored-by: Nikola Jokic <97525037+nikola-jokic@users.noreply.github.com>
Co-authored-by: Stefan Ruvceski <stefan.ruvceski@akvelon.com>
Co-authored-by: ruvceskistefan <96768603+ruvceskistefan@users.noreply.github.com>
Co-authored-by: Thomas Boop <thboop@github.com>
Co-authored-by: stefanruvceski <ruvceskistefan@github.com>
Co-authored-by: Tingluo Huang <tingluohuang@github.com>
Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>
2022-06-10 13:51:20 +00:00
Ferenc Hammerl
ac7b34a071 Release 2.293.0 (#1940)
* Update releaseNote.md

* 2.293.0 Release

* Update releaseNote.md

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>
2022-06-10 15:42:51 +02:00
Thomas Boop
0d1e6fd57b Add ADR for Container Hooks (#1891)
* Add ADR for Container Hooks

* Rename 0000-container-hooks.md to 1891-container-hooks.md

* Update 1891-container-hooks.md

* Update docs/adrs/1891-container-hooks.md

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>

* Update docs/adrs/1891-container-hooks.md

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>

* Update docs/adrs/1891-container-hooks.md

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>

* Update docs/adrs/1891-container-hooks.md

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>

* Update docs/adrs/1891-container-hooks.md

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>

* Update docs/adrs/1891-container-hooks.md

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>
2022-06-10 09:35:30 -04:00
Ferenc Hammerl
9623a44c2f Allow admins to fail jobs without container (#1895)
* Allow admins to fail jobs without container

* Make method static

* Update src/Runner.Common/Constants.cs

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>

* Update src/Runner.Worker/JobExtension.cs

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>

* Update src/Runner.Worker/JobExtension.cs

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>

* Rename env

* Add test for throwing when no container but required

* Update src/Runner.Worker/JobExtension.cs

* Update src/Test/L0/Worker/JobExtensionL0.cs

* Update src/Runner.Common/Constants.cs

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>
2022-06-09 17:17:11 -04:00
Rob Herley
b2e2aa68c8 remove job summary feature flag (#1936) 2022-06-09 15:49:04 -04:00
eric sciple
a9ce6b92c4 Allow redirect get message call to broker (#1935) 2022-06-09 18:36:55 +00:00
eric sciple
a1bf8401d7 Handle message from broker (#1934) 2022-06-09 14:07:44 -04:00
eric sciple
a7152f1370 server wrapper for pulling full job message (#1933) 2022-06-09 17:50:52 +00:00
eric sciple
af285115e7 http client updates for broker flow (#1931) 2022-06-09 12:46:08 -04:00
Ferenc Hammerl
0431b6fd40 Revert bash and shell -e filePath escape (#1932)
It generated invalid arguments for `Process()` when the `bash` command itself was an argument as well, for example:

```
            _proc.StartInfo.FileName = "/usr/bin/docker";
            _proc.StartInfo.Arguments = "exec -i --workdir /__w/container-hook-e2e/container-hook-e2e 47105c66144d8809d9fa2bce9a58ea0564cd14def0ae7952cd6231fba3576db1 sh -e '/__w/_temp/fd086560-cb92-4f3b-a99c-35a6b7b1bbdb.sh'";
```
2022-06-09 14:37:08 +02:00
Nikola Jokic
c3d5449146 Job hook provider now sets shell name for script handler (#1826)
* Job hook provider now sets shell name for script handler

* fixed script handler and job hook provider to work with the name without fail

* returned used import by osx

* fixed order of imports

* added quotes around resolved script path allowing space in script path

* added quotes around bash and sh _defaultArguments

* Changed double quotes to single quotes in sh -e

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>

* Changed double quotes to single quotes in bash

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>
2022-06-08 13:54:23 +02:00
Tingluo Huang
9c5300b5b2 Handle HostedRunnerShutdownMessage from service to shutdown hosted runner faster. (#1922) 2022-06-02 13:14:50 -04:00
ruvceskistefan
183b1f387c targetArchitecture removed from launch.json after macos arm64 release (#1908) 2022-05-23 21:46:54 -04:00
Thomas Boop
42ad85741e 292 release (#1906) 2022-05-23 09:53:08 -04:00
Ferenc Hammerl
88ee16fb02 Save original, pre-parsed string from workflow input for the user's custom volume mounts (#1889)
* Save pre parsed string from workflow input for volume mounts that have one

* Use property

* Use named params
2022-05-23 12:07:38 +02:00
Thomas Boop
5cca207314 Port the 291.1 hotfix to main (#1905)
* Revert "Added ability to run Dockerfile.SUFFIX ContainerAction (#1738)"

20b7e86e47

* port release notes
2022-05-22 16:24:18 -04:00
Tingluo Huang
0b73794267 Set timeout on sending live console log. (#1903) 2022-05-20 21:31:21 -04:00
Tingluo Huang
d7694774a4 Update release note, workflow, doc for osx-arm64. (#1904) 2022-05-20 12:28:52 -04:00
Tingluo Huang
0398f57125 Create runner layout for osx-arm64 (Apple M1) platform. (#1618)
* Create runner layout for osx-arm64 (Apple M1) platform.

* bypass m1 macos

* l0
2022-05-20 11:00:54 -04:00
Tingluo Huang
fade0f46e7 Bump dotnet SDK to 6.0.300 (#1900)
* Bump dotnet SDK to 6.0.300
2022-05-17 22:51:32 -04:00
Thomas Boop
02b52e8497 ADR: Runner Job Started/Completed Hooks (#1751)
* RunnerHookADR

* Rename 0000-runner-job-hooks.md to 1751-runner-job-hooks.md

* Update docs/adrs/1751-runner-job-hooks.md

Co-authored-by: Edward Thomson <ethomson@github.com>

* Update docs/adrs/1751-runner-job-hooks.md

Co-authored-by: Edward Thomson <ethomson@github.com>

* update step names

Co-authored-by: Edward Thomson <ethomson@github.com>
2022-05-12 15:18:11 -04:00
Ferenc Hammerl
628f462ab7 Use header of redirect instead of parsing content (#1874)
* Use header of redirect instead of parsing content

* Add exception so we don't hit 404s later

* Fix typo

* Update SelfUpdaterL0.cs
2022-05-09 14:04:18 +02:00
Ferenc Hammerl
7ba4f8587e 2.291.0 Release Notes (#1854)
* Update releaseNote.md

* Update runnerversion

* Update releaseNote.md

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>
2022-04-29 12:53:36 +02:00
ruvceskistefan
88f7c56757 Issue 1528: use OS specific path separator (#1617)
* Issue 1528: use OS specific path separator

* Using Path.Combine instead of OS specific c_defaultPathSeparator
2022-04-27 22:16:03 -04:00
Nikola Jokic
20b7e86e47 Added ability to run Dockerfile.SUFFIX ContainerAction (#1738)
* Added ability to run Dockerfile.SUFFIX ContainerAction

* Extracted IsDockerFile method

* reformatted, moved from index to Last()

* extracted IsDockerfile to DockerUtil with L0

* added check for IsDockerfile to account for docker://

* updated test to clearly show path/dockerfile:tag

* fail if Data.Image is not Dockerfile or docker://[image]
2022-04-27 21:23:12 -04:00
Tingluo Huang
bd5f275830 Update runnerversion to match latest release. 2022-04-26 09:54:42 -04:00
Yang Cao
a7aadf5615 Update Actions Summary limit to 1MiB (#1839)
* Update Actions Summary limit to 1MiB

* Making limit a public const so other part of the codebase is aware of the limit too
2022-04-20 17:08:50 -04:00
Tingluo Huang
1c582abc8b Skip running L0 tests in release workflow to prevent package pollution (#1832) 2022-04-19 16:10:47 -04:00
Tingluo Huang
44d4d076fe Capture telemetry when git errors on unsafe repository. (#1823) 2022-04-13 12:48:52 -04:00
Ferenc Hammerl
b6195624ac 2.290.0 Release notes (#1820)
* 2.290.0 rel notes

* Update releaseNote.md
2022-04-12 10:34:15 -04:00
ruvceskistefan
ead3509d5a Added warning in case of invalid combination of command and flags and/or arguments (#1781)
* Added warning in case of invalid combination of command and flags and/or arguments

* Deleting unnecessary comments

* Added separate list for generic options

* Added PAT to the valid remove options

* Added command name to the error message
2022-04-11 09:23:58 -04:00
Nikola Jokic
fee24199cb Added input context to shell in composite run-step (#1767)
* Added input context to shell in composite run-step

* moved from string-shell-context to string-steps-context
2022-04-11 14:50:45 +02:00
Nikola Jokic
c8cb600ac7 Use StepHost when evaluating inputs to actions (#1762)
* composite action github.action_path set based on the StepHost

* in progress on updating github context for input template

* Fixed updating the context data for evaluation

* refactored logic so it is a little cleaner

* removed resolving the action_path in CompositeActionHandler

* removed added DeepClone

* added feature flag and modified the dict in place

* refactored step host to change context data. Added L0

* repaired spaces

* moved logic from step host to execution context, added recursive translation

* removed empty lines

* moved to extension methods

* Update src/Test/L0/Worker/StepHostL0.cs

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>
2022-04-11 12:43:24 +00:00
Thomas Boop
f48f314a70 fix an issue where container hooks used the job default working directory (#1809) 2022-04-06 14:33:32 +02:00
Soe Tun
7b677e0618 Mark run as Cancelled/Failed upon HostContext.RunnerShutdownToken state (#1792)
- github/c2c-actions-support#883
2022-04-04 13:46:03 -04:00
Nikola Jokic
d70f9f6174 Continue on error for the composite actions (#1763)
* Added continue on error to composite action

* changed from boolean-strategy-context -> boolean-steps-context for action_yaml

* refactored composite handler to always set outcome

* retrigger checks

* fixed typo in ??= operator

* boolean-steps-context accepts the same context as string-steps-context

* setting the outcome only on continue-on-error

* moved continue on error logic to the execution context

* Added L0 table tests for continue-on-error ExecutionContext

* Added missing mocks on StepsRunnerL0 for this update

* removed empty line and added one line separating the call

* Removed empty line

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>
2022-04-01 09:18:53 -04:00
dependabot[bot]
0343e76789 Bump minimist from 1.2.5 to 1.2.6 in /src/Misc/expressionFunc/hashFiles (#1783)
Bumps [minimist](https://github.com/substack/minimist) from 1.2.5 to 1.2.6.
- [Release notes](https://github.com/substack/minimist/releases)
- [Commits](https://github.com/substack/minimist/compare/1.2.5...1.2.6)

---
updated-dependencies:
- dependency-name: minimist
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-03-29 22:40:32 -04:00
Yashwanth Anantharaju
909b05eb66 FeedStream: handle websocket close failures (#1789)
* handle close failures

* handle in other place as well

* refactor

* bump runner version

* update release notes
2022-03-28 14:41:21 -04:00
Yashwanth Anantharaju
2e3976cf97 Feedstream websocket: set user agent (#1791)
* set user agent

* let's also add prefix
2022-03-28 14:31:23 -04:00
ruvceskistefan
052ac521b0 Issue 1739: Fixing null reference exception during configuring runner with invalid repo URL or token (#1741)
* Fixing null reference exception when configuring runner with invalid repo URL or token

* Throw exception instead of ConvertFromJson

* Storing the response code
2022-03-28 09:06:24 -04:00
Ferenc Hammerl
408d6c579c Add annotations if Node 12 action is found and FF is on (#1735)
* Add annotations if node 12 action is found

* Better placeholder

* Only warn if FF is on

* Move annotation logic

* Pass in the LTS Url

* Raise annotation right before executing the action

* Match server side FF name

* Change name back to features

* Better warning text

* Update src/Runner.Common/Constants.cs

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>
2022-03-22 10:43:25 +01:00
Thomas Boop
46258428cd 2.289.1 release notes (#1771) 2022-03-18 14:22:24 -04:00
Thomas Boop
eb9a604b63 Revert "Added repository name and workflow file name to console output (#1761)" (#1770)
98aa9c1152
2022-03-18 14:09:01 -04:00
Thomas Boop
8792d8e5ee cleanup message displayed on job started/completed hooks (#1769) 2022-03-18 14:08:50 -04:00
Thomas Boop
87e86e3d72 2.289.0 release notes (#1766) 2022-03-18 16:12:50 +01:00
Thomas Boop
48b6cd9a42 Update dependencies to latest versions (#1756) 2022-03-17 23:21:35 -04:00
Yashwanth Anantharaju
d081289ed5 postlines: refactor per feedback (#1755)
* refactor per feedback

* feedback

* nit

* commentify

* feedback

* feedback
2022-03-17 21:35:20 -04:00
Ferenc Hammerl
7d5e9cd70f Runner Job Started/Completed Hooks (#1737)
* Prototype for pre job hook

* Remove debug log

* Enable hooks again

* Initialize with hostContext

* Add event_path, fix no-path bug

* Allow script post steps

* Call script handler with correct pre post stage

* Add job completed hook

* Make filecommand work and hardcode shell

* Conditionally print step details and no telemetry for hooks

* Figure out whih script to use

* Only check path for managed scripts

* Resture win dependency

* Nits

* Remove unused, add named params

* Telemetry + refactoring

* add message to job

* rename hooks remove stale comment

* cleanup

* Use .CreateService to create step

* Add L0s

* pr feedback

* update tests

* add disclaimer, clean up code

* spacing fix

* little more cleanup

* pr fix

* pr feedback

* Refactor to use JobExtension

* fix tests

* fix typo

* cleanup code

* more cleanup

* little more cleanup

* last bit of cleanup

* fix tests

* nit fix

* Update src/Runner.Worker/JobHookProvider.cs

Co-authored-by: Edward Thomson <ethomson@github.com>

* don't override runner telemtry

* pr feedback

* pr feedback

* pr feedback

Co-authored-by: Thomas Boop <thboop@github.com>
Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>
Co-authored-by: Edward Thomson <ethomson@github.com>
2022-03-17 21:35:04 -04:00
ruvceskistefan
98aa9c1152 Added repository name and workflow file name to console output (#1761)
* Adding repo name and workflow file to console output

* Add guard for empty workflow file name
2022-03-17 13:25:28 -04:00
Yashwanth Anantharaju
ddc700e9eb Send postlines via websocket if we can (#1730)
* feed via websocket

* feed via websocket

* feedback

* ensure right schema is used

* fix resiliency

* some fixes

* fix sending message

* chunk data

* let's abort, which will also dispose

* close gracefully
2022-03-15 14:01:18 -04:00
Konrad Pabjan
a0458aebfe Save record order for annotation links when creating issues (#1744)
* Save record order for annotation links when creating issues

* PR feedback

* Add tests for step and line numbers
2022-03-14 11:20:11 -04:00
Tingluo Huang
b2c6d093b2 Validate packages hash before uploading to github release in CD workflow. (#1745) 2022-03-14 09:21:13 -04:00
Thomas Boop
292a2e0ab3 Fix spelling (#1747) 2022-03-11 09:41:54 -05:00
Nikola Jokic
29cee52276 Prefer user who initiated install before (#1714) 2022-03-10 14:08:19 +01:00
Antoine Grondin
ad0d0c4d0a worker: expose github.triggering_actor as an env-var (#1726)
* worker: expose `github.triggering_actor` as an env-var

* worker: sort the allow list
2022-03-02 16:49:26 -05:00
Thomas Boop
2c6064a655 Update to v2.288.1 (#1723)
We hotfixed the releases/m288 branch to update to v2.288.1. This PR brings us to parity on the main branch
2022-03-01 18:19:56 +00:00
ruvceskistefan
af6c8e6edd Issue 1698: Use safe_sleep executable in bash scripts (#1707)
* use safe_sleep executable in bash scripts

* new line at the end of safe_sleep bash script

* Replacing relative paths with absolute paths and changing location of safe_sleep

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>
2022-03-01 13:08:52 +00:00
Nikola Jokic
c15d3f10b2 Enhancement: RunnerService.js added logic to fail on N attempts if env variable exported (#1693)
* RunnerService.js added logic to fail on N attempts

* removed code grafeculShutdown, removed unused import
2022-03-01 13:55:25 +01:00
TingluoHuang
bdf1e90503 Prepare 2.288.0 runner releases. 2022-02-25 15:08:26 -05:00
Ferenc Hammerl
100c99f263 Force JS Actions Node version to 16 if FF is on unless user opted out (#1716)
* Set GH actions Node version to 16 if FF is on unless user opted out

* Add L0s (WIP)

* Wrap tests into theory

* Only check for node12 actions

* Refactor node version picking
2022-02-25 14:59:16 -05:00
Ferenc Hammerl
e8ccafea63 Add internal to node version function and use better env var name (#1715) 2022-02-25 14:59:02 -05:00
Thomas Boop
02d2cb8fcd Lets allow up to 150 characters for services on linux/mac (#1710)
* Lets allow up to 150 characters on linux/mac, just to avoid some issues with runner naming

* Add 4 randomized digits on mac/linux

* fix pragma issue

* fix test

* Address pr feedback

* reduce complexity

* lets make it cleaner!

* fix test

* fix logic
2022-02-24 21:05:51 -05:00
Rob Herley
0cbf3351f4 Update summary max file size annotation error text (#1712)
* add doc link to summary file size err annotation

* Update src/Runner.Common/Constants.cs

Co-authored-by: Konrad Pabjan <konradpabjan@github.com>

* remove i18n from url

Co-authored-by: Konrad Pabjan <konradpabjan@github.com>
2022-02-24 21:01:49 -05:00
Ferenc Hammerl
6abef8199f Use better exit codes and comparison (#1708) 2022-02-24 21:10:52 +01:00
ruvceskistefan
ec9830836b Issue 1596: Runner throws null ref exception when new line after EOF is missing (#1687)
* Issue 1596: runner throws nullref exception when writting env var

* Adding tests for missing new line after EOF marker

* Changing newline to new line
2022-02-23 09:55:59 -05:00
Nikola Jokic
460c32a337 Repaired hashFiles call so if error was thrown, it was returned to process invoker (#1678)
* hashFiles.ts added exit status on promise action

* generated layoutbin/hashfiles/index.js
2022-02-23 09:51:09 -05:00
ruvceskistefan
934027da60 Issue 1261: inconsistency of outputs (both canceled and cancelled are used) (#1624)
* Issue 1261: inconsistency of outputs

* Changing cancelled to canceled in one error message
2022-02-23 09:48:24 -05:00
Tingluo Huang
28f0027938 Add SHA to useragent. (#1694) 2022-02-17 20:01:48 +00:00
Thomas Boop
17153c9b29 Revert "revert node12 version due to fs.copyFileSync hang https://git… (#1651)
* Revert "revert node12 version due to fs.copyFileSync hang https://github.com/actions/runner/issues/1536 (#1537)"

bef164a12f

* check hashs before tests because tests rely on right values + update hashes

* fix tests

* use hc trace
2022-02-17 09:54:13 -05:00
Tingluo Huang
a65ac083b4 Skip DeleteAgentSession when the acess token has been revoked. (#1692) 2022-02-16 16:10:18 -05:00
Tingluo Huang
882f36dcf8 Sending telemetry about actions usage. (#1688)
* Sending telemetry about actions usage.

* .

* L0 tests.

* .
2022-02-16 12:18:21 -05:00
ruvceskistefan
f2578529b0 Issue 1662: retry policy for methods GetTenantCredential and GetJITRunnerTokenAsync (#1691)
* Issue 1662: Adding retry policy for methods GetTenantCredential and GetJITRunnerTokenAsync

* Adding HttpClient creation to the retry

* Random backoff time
2022-02-16 10:56:45 -05:00
Ferenc Hammerl
bd77ccf34e Prefer node16 over node12 when running internal scripts (#1621)
* Use 16 to run RunnerService.js

* Execute hashfiles using node16

* Run downloadCert.js using node16

* Run makeWebRequest.js using node16

* Run macos-run-invoker.js using node16

* Run hashFiles.js using node16

* Update tests to use node16

* Update documentation to recommend node16

* Duplicate macos service js fix for 16

* Add PR link

* Revert ADR node change

* Merge node12/16 retainment IFs

* Try both node12 and node16

* Close if

* Revert "Update tests to use node16"

This reverts commit bbca7b9f1c.

* Fix condition

* Unfurl if condition

* Allow user to force a node version

* Format update template

* Comment env var

* Rename vars

* Fix naming

* Fix rename

* Set node ver override if job message has it

* Format executionContext

* Can only receive 'forceNode12' or nothing from FF
No specific node version from server

Co-authored-by: Ferenc Hammerl <hammerl.ferenc@gmail.com>
2022-02-14 15:06:08 +01:00
Tingluo Huang
cb19da9638 Move JobTelemetry and StepsTelemetry into GlobalContext. (#1680)
* Move JobTelemetry and StepsTelemetry into GlobalContext.

* .

* .
2022-02-11 16:18:41 -05:00
Ferenc Hammerl
d64190927f Allow mocked updates for E2E testing (#1654)
* Allow mock update messages

* Kill node process see other PR

* Better comments

* Better comparison for archiveFile

* Revert merge comment mistakes

Co-authored-by: Ferenc Hammerl <hammerl.ferenc@gmail.com>
2022-02-11 21:27:26 +01:00
Sam Cook
101b74cab6 Add ability to specify runner group when creating service (#1675) 2022-02-11 21:25:55 +01:00
Nikola Jokic
c06da82ccd updated systemd svc.sh to accept custom service file (#1612)
* updated systemd svc.sh to accept custom service file

* updated systemd and darwin svc templates to accept TEMPLATE_PATH env
2022-02-11 09:29:48 -05:00
Balaga Gayatri
374989b280 Pass jobId to the actionsDownloadInfo controller (#1639)
* Update JobServer.cs

* Update ActionManager.cs

* Update TaskHttpClientBase.cs

* Update ActionManagerL0.cs

* Update ActionManager.cs

* :nit changes

* Update ActionManager.cs

* :nit changes

* Code formatting

* Update JobServer.cs

* Update JobServer.cs

* Update TaskHttpClientBase.cs

* Update ActionManagerL0.cs

* :nit changes

* passing `jobId` as queryparameter to the controller

* :nit changes

* Update src/Sdk/DTGenerated/Generated/TaskHttpClientBase.cs

Co-authored-by: Lokesh Gopu <lokesh755@github.com>

Co-authored-by: Tingluo Huang <tingluohuang@github.com>
Co-authored-by: Lokesh Gopu <lokesh755@github.com>
2022-02-09 14:42:13 -05:00
Tingluo Huang
47fee8cd64 Fix typo in hashFiles.ts. (#1672)
* Fix typo in hashFiles.ts.

* l0

* .
2022-02-09 13:13:51 -05:00
ruvceskistefan
85dcd93d98 Problem with debugging on macOS M1 (#1625)
* Solving issue with debugging on macOS M1

* Fixing problem with debugging on macOS M1

* Adding targetArchitecture in launch.json configs

* Code refactor
2022-02-08 14:17:46 -05:00
Rob Herley
bac91075f4 Use job execution context instead of step for adding summary attachments (#1667)
* use job exec context to queue/attach summaries

* step summary tests: use job ctx and verify against server queue
2022-02-08 10:22:36 -08:00
Ferenc Hammerl
9240a1cf6c Fix windows console runner update crash (#1670)
* Kill node process to recover handle

So we can print to the console in Runner.Listener once again

* Revert testing changes

Co-authored-by: Ferenc Hammerl <hammerl.ferenc@gmail.com>
2022-02-08 15:27:04 +01:00
Tim Burgan
2946801fb6 Added examples and aligned language within docs/checks/actions.md (#1664)
* examples

* Update actions.md
2022-02-07 10:45:23 -05:00
Sven Pfleiderer
1a0d588d3a Add support for Step Summary (#1642)
* First prototype of step summary environment variable

* Fix file contention issue

* Try to simplify cleaning up file references

* use step id as md file name, queue file attachment

* separate logic into attachment summary func

* Fix indentation

* Add (experimental) feature flag support

* reorganize summary upload determination logic

* file i/o exception handling + pr feedback

* Revert changes for now to reintroduce them later

* Add skeleton SetStepSummaryCommand

* Update step summary feature flag name

* Port ShouldUploadAttachment from previous iteration

* Port QueueStepSummaryUpload from previous iteration

* Improve exception handling when uploading attachment

* Add some minor logging improvements

* Refuse to upload files larger than 128k

* Implement secrets scrubbing

* Add TODO comment to remove debugging temp files

* Add first tests

* Add test for secret masking

* Add some naming/style fixes suggested in feedback

* inline check for feature flag

* Inline method for style consistency

* Make sure that scrubbed file doesn't exist before creating it

* Rename SetStepSummaryCommand to CreateStepSummaryCommand

* Fix error handling messages

* Fix file command name when registering extension

* Remove unnecessary file deletion

Co-authored-by: Rob Herley <robherley@github.com>
2022-02-04 13:46:30 -08:00
jeremyd2019
192ebfeccf fix run.cmd script (#1633)
Restore ability to run run.cmd from directories other than the runner root, and fix it exiting the cmd that's running it.  Fixes #1632
2022-02-02 12:09:13 +01:00
Ferenc Hammerl
f2347b7a59 Use absolute path when invoking run-helper.sh or Runner.Listener (#1645) 2022-02-02 12:08:35 +01:00
Ferenc Hammerl
8f160bc084 Reopen 'Make run.sh|cmd handle update without quitting so containers using them as entrypoints don't exit on update ' (#1646)
* Only execute post for actions that have one

* Working container runner update with run.sh

* Revert "Only execute post for actions that have one"

This reverts commit 9675941fdb.

* Relaunch the listener without quitting run.cmd

* Fix typo

* Extract most os run.sh logic so we can update it

* Add bash line endings

* Extract the logic from run.cmd

* Add EoF lines

* Add unexpected ERRORLEVEL messages to cmd

* Simplify contract between run and helper

* Remove unused exit

* WIP: run a copy of the helper so it's safe to update

* Throw NonRetryableException if not configured

* Log and format

* Fix typo

* Fix typo

* Use helper template system for bash as well

* Update run.sh

* Remove unnecessary comments

* Use ping instead of timeout

* Use localhost in ping-timeout (n times, w timeout)

Co-authored-by: Ferenc Hammerl <hammerl.ferenc@gmail.com>
2022-02-02 11:16:01 +01:00
Thomas Boop
47ba1203c9 Revert "Make run.sh|cmd handle update without quitting so container… (#1635)
* Revert "Make `run.sh|cmd` handle update without quitting so containers using them as entrypoints don't exit on update (#1494)"

d8251bf912

* update runnerversion as well
2022-02-01 15:19:04 +01:00
Thomas Boop
dc8b1b685f Runner 2.287.0 Release Notes (#1631)
* Update runner to 2.287.0

* Update release notes
2022-01-27 11:28:40 -05:00
Tingluo Huang
8eacbdc79f Runner config option to disable auto-update. (#1558)
* Runner config option to disable auto-update.

* Update src/Runner.Listener/Configuration/ConfigurationManager.cs

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>

* Update src/Runner.Listener/Configuration/ConfigurationManager.cs

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>

* Update src/Runner.Listener/Configuration/ConfigurationManager.cs

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>

* Update src/Runner.Listener/Configuration/ConfigurationManager.cs

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>

* feedback.

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>
2022-01-26 13:23:24 -05:00
Pavel Iakovenko
6b4a95cdb1 Use default 8Mb chunking for the FileContainer uploads (#1626) 2022-01-24 13:57:05 -05:00
Josh Soref
c95d5eae30 Update 0276-problem-matchers.md (#1105)
* Update 0276-problem-matchers.md

Update to reflect current behavior

* Update docs/adrs/0276-problem-matchers.md

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>
2022-01-21 11:35:50 -05:00
Rob Cowsill
ea67ff9647 Update Required Dev Dependencies (#1379)
* Add cURL to Linux requirements

* Add VS2017 to Windows requirements
2022-01-21 11:35:29 -05:00
Josh Soref
d7d38e173e Update 0354-runner-machine-info.md (#1108) 2022-01-21 11:35:14 -05:00
Tingluo Huang
ac31fd10b2 Introduce GITHUB_ACTIONS_RUNNER_TLS_NO_VERIFY=1 to skip SSL cert verification for the runner. (#1616) 2022-01-19 10:31:17 -05:00
Ferenc Hammerl
d8251bf912 Make run.sh|cmd handle update without quitting so containers using them as entrypoints don't exit on update (#1494)
* Only execute post for actions that have one

* Working container runner update with run.sh

* Revert "Only execute post for actions that have one"

This reverts commit 9675941fdb.

* Relaunch the listener without quitting run.cmd

* Fix typo

* Extract most os run.sh logic so we can update it

* Add bash line endings

* Extract the logic from run.cmd

* Add EoF lines

* Add unexpected ERRORLEVEL messages to cmd

* Simplify contract between run and helper

* Remove unused exit

* WIP: run a copy of the helper so it's safe to update

* Throw NonRetryableException if not configured

* Log and format

* Fix typo

* Fix typo

* Use helper template system for bash as well

* Update run.sh

* Remove unnecessary comments

* Use ping instead of timeout

* Use localhost in ping-timeout (n times, w timeout)

Co-authored-by: Ferenc Hammerl <hammerl.ferenc@gmail.com>
2022-01-19 14:38:43 +01:00
Tingluo Huang
715bb7cca8 Fix breaking change in dotnet 6 around globalization-invariant. (#1609) 2022-01-14 17:09:06 +00:00
Thomas Boop
47dfebdf48 Set outcome/conclusion for composite action steps in steps context (#1600)
* set outcome/conclusion for composite action steps in steps context

* fix typo

* pr fixes

* fix happy case!
2022-01-12 11:14:46 -05:00
Tingluo Huang
7cb198a554 Generate refname for all build/pull container step. (#1601) 2022-01-11 10:15:44 -05:00
Tingluo Huang
7616e9b7aa Use trimmed packages to speedup runner updates (#1568)
* consume trimmed packages.

* .

* .

* .

* .
2022-01-10 21:24:55 -05:00
Tingluo Huang
3b8475de3e Skip adding line to console line queue if the queue is backed up. (#1592)
* Skip adding line to console line queue if the queue is backed up.

* .
2022-01-07 14:28:21 -05:00
Tomasz
ba9766d544 change group description (#1595) 2022-01-07 14:27:48 -05:00
Tyler887
29da60a538 Delete Runner E2E tests badge (#1582) 2022-01-07 14:27:27 -05:00
Tingluo Huang
f2e210e5f3 Add trace to help debug IPC message corruption in runner. (#1587)
* Add trace to help debug IPC message corruption in runner.

* .
2022-01-05 13:42:20 -05:00
Tingluo Huang
fa32fcf2a1 Use checkout@v2 in workflows (#1588)
* Use checkout@v2 in workflows

* .
2022-01-05 11:40:26 -05:00
Tingluo Huang
46da23edb1 Allow script to exit early as soon as runner process exits. (#1580) 2022-01-04 19:02:45 -05:00
Tingluo Huang
9bfbc48f45 Prepare runner release 2.286.0. (#1574) 2021-12-21 10:50:14 -05:00
Tingluo Huang
ead1826afb Update codeql.yml 2021-12-21 10:32:55 -05:00
khaser
9de17f197c Deleted extra background in github-praph.png, which is displayed in README.md (#1432)
* github-praph.png deleted extra background

* background around tentacles of mascot also deleted
2021-12-21 10:29:18 -05:00
Hans Kratz
45decac397 Fix test failure: /bin/sleep on Macos 11 (Monterey) does not accept the suffix s. (#1472) 2021-12-21 10:27:48 -05:00
Edward Thomson
55ed60b9fc Direct people to Feedback or Support forums (#1571)
Many people open bug reports or feature requests in the `actions/runner`
repository that are more generally about GitHub Actions.  Often changes
in GitHub Actions are cross-cutting across multiple teams or feature
areas, so it's best if we direct people to the more general areas
(Actions Community Support or GitHub Feedback) so that we can get the
most eyes on the problem and give the quickest response.
2021-12-20 15:21:32 -05:00
George Karagoulis
698d3a2e66 Show service container logs on teardown (#1563)
* Update ContainerOperationProvider.cs

* Only print logs for service container jobs.
2021-12-20 10:55:47 -05:00
Tingluo Huang
d0ab54ce45 Refactor SelfUpdater adding L0 tests. (#1564)
* Refactor SelfUpdater with L0 tests.

* .

* .
2021-12-20 00:37:14 -05:00
Tingluo Huang
3e65909b81 Produce trimmed down runner packages. (#1556)
* Produce trimmed down runner packages.

* feedback.

* rename.
2021-12-15 22:05:58 -05:00
Tingluo Huang
3ec20e989d Update dependency check for dotnet 6. (#1551) 2021-12-15 12:16:11 -05:00
eric sciple
231fdcb19d bump patch version 2021-12-08 12:51:38 -06:00
eric sciple
bef164a12f revert node12 version due to fs.copyFileSync hang https://github.com/actions/runner/issues/1536 (#1537) 2021-12-06 10:27:44 -06:00
Meng Ye
a519f96a41 fix Log size and retention settings not work (#1507)
env
- RUNNER_LOGRETENTION
- WORKER_LOGRETENTION
- RUNNER _LOGSIZE
- WORKER _LOGSIZE
2021-12-02 10:04:39 -05:00
Tingluo Huang
b1ecffd707 Add masks for multiline secrets from ::add-mask:: (#1521)
* Add mask for multiline secrets.

* .
2021-12-01 09:53:13 -05:00
Tingluo Huang
801a02ec89 Bump runtime to dotnet 6 (#1471)
* bump runtime to dotnet 6
2021-11-30 22:00:15 -05:00
Ferenc Hammerl
6332f9a42f Prepare for runner 2.285.0 release (#1520) 2021-11-29 16:07:56 +00:00
Tingluo Huang
5b8ff174c6 Add telemetry around runner update process. (#1497)
* Add telemetry around runner update process.

* .

* .

* .
2021-11-22 18:27:57 -05:00
Tingluo Huang
e3e977fd84 Support node.js 16 and bump node.js 12 version. (#1439)
* Support node.js 16 and bump node.js 12 version.

* L0
2021-11-18 15:25:33 -05:00
Ferenc Hammerl
4dc8a09db3 Only execute post for actions that have one (#1481)
* Only execute post for actions that have one

* Revert haspost check

* Remove launch commit

* Remove comment

* Restore whitespace

* Restore wspace
2021-11-18 17:56:13 +01:00
Laura Yu
dcc5d34ad1 Add secret source to start job step (#1411)
* Add secret source to start job step

WIP

* Update to use GetGitHubContext to grab source info

* Update JobExtensionL0.cs

* Update JobExtension.cs

* Update JobExtension.cs
2021-11-17 17:09:38 -05:00
eric sciple
3e34fb10c1 improve telemetry to better diagnose runner configuration issues (#1487) 2021-11-15 13:42:57 -06:00
Tingluo Huang
23a693aa2c Update README.md 2021-11-09 14:19:46 -05:00
Tingluo Huang
eb36db8ff9 Try to delete portable-net45+win8 from all projects (#1470)
* Update Runner.Common.csproj

* Update Runner.Listener.csproj

* Update Runner.PluginHost.csproj

* Update Runner.Plugins.csproj

* Update Runner.Sdk.csproj

* Update Runner.Worker.csproj

* Update Sdk.csproj

* Update Test.csproj
2021-11-09 10:53:53 -05:00
Julio Barba
85e1927754 Prepare for runner 2.284.0 release (#1448) 2021-11-01 11:16:21 -04:00
Julio Barba
b6dbf42746 Improve retry handling based on feedback (#1447) 2021-10-29 16:04:34 -04:00
Ferenc Hammerl
67ba8a7d42 Support Conditional Steps in Composite Actions (#1438)
* conditional support for composite actions

* Fix Conditional function evaluation

* Push launch.json temporarily

* Revert "Push launch.json temporarily"

* rename context

* Cleanup comments

* fix success/failure functions to run based on pre/main steps

* idea of step_status

* change to use steps context, WIP

* add inputs to possible if condition expressions

* use action_status

* pr cleanup

* Added right stages

* Test on stage in conditional functions

* Fix naming and formatting

* Fix tests

* Add success and failure L0s

* Remove comment

* Remove whitespace

* Undo formatting

* Add L0 for step-if parsing

* Add ADR

Co-authored-by: Thomas Boop <thboop@github.com>
2021-10-29 15:45:42 +02:00
Ferenc Hammerl
e4f9e6ae26 Log current runner version in terminal (#1441) 2021-10-29 14:23:26 +02:00
Thomas Boop
854d5e3bf3 Fix an issue where nested local composite actions did not correctly register post steps (#1433)
* Always register post steps for local actions

* Register post steps along with their conditions

* remove debug code

Co-authored-by: Ferenc Hammerl <fhammerl@github.com>
2021-10-27 15:31:58 +02:00
Thomas Boop
57dec28f68 Cleanup Older versions on MacOS now that we recreate node versions as needed (#1410)
* Cleanup old version update code

* fix template

* fix indents
2021-10-19 10:15:37 -04:00
Tingluo Huang
55a861f089 Expose GITHUB_REF_* as environment variable (#1314)
* Keep env vars alphabetical

* ref_* context.

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>
2021-10-18 22:22:34 -04:00
jeremyd2019
51b2031cbf Add arch to runner context (#1372)
Fixes #1185
2021-10-13 23:49:26 -04:00
Raphael Cruzeiro
400b2d879c Makes the user keychains available to the service (#847)
Without creating a session, the service is not able to access the keychains for the user specified under `UserName`. This causes any workflow that deals with code signing to fail as the only keychain loaded with be the system one. This should fix #350
2021-10-06 15:37:45 -04:00
Thomas Boop
c4b6d288d4 fix ephemeral runner upgrade on mac/linux (#1403) 2021-10-05 10:15:19 +02:00
Julio Barba
0699597876 Use Actions Service health and api.github.com endpoints after connection failure on Actions Server and Hosted (#1385) 2021-09-30 13:40:34 -04:00
Thomas Boop
a592b14ae3 Runner 2.283.2 Release (#1389) 2021-09-29 15:49:40 -04:00
Thomas Boop
04269f7b1b Handle keeping previous OSX versions more smoothly on Mac (#1381)
* Handle macOS upgrade smoothly

* cleanup

* misc cleanup

* final updates

* Update src/Misc/layoutbin/update.sh.template

Co-authored-by: Patrick Ellis <319655+pje@users.noreply.github.com>

* Update src/Misc/layoutbin/update.sh.template

Co-authored-by: Patrick Ellis <319655+pje@users.noreply.github.com>

* Upload telemetry and default to old method as needed

* minor fix

* add one more bit of logging

* some more telemetry

* quote variables to handle spaces

* tiny fix for ubuntu

* remove version and move telemetry to diag

* use full path

Co-authored-by: Patrick Ellis <319655+pje@users.noreply.github.com>
2021-09-29 15:49:31 -04:00
Ferenc Hammerl
e89d2e84bd Stop-Commands: stopToken restrictions (#1371)
* Prevent stopTokens that are workflow commands

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>

* Check context for env var too

* Accept true, 1 and $true instead of just "true"

* Setup ExpressionValues in tests

* Update src/Runner.Common/Constants.cs

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>

* Separate success and fail tests for invalid token

* Fix envcontext for tests

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>
2021-09-29 14:44:01 -04:00
Thomas Boop
afe7066e39 only cleanup runner local files on success (#1384) 2021-09-28 18:55:28 -04:00
Ferenc Hammerl
da79ef4acb Fix unconfiguring of runner after group changes (#1359)
* Ignore agentpool when unconfiguring the runner

Runner names and IDs are unique within a ServiceHost
They don't need to be included when unconfiguring the runner.

* Use -1 instead of 0 to highlight how it is ignored

* Use overloads and 0 instead of -1

Using 0 seems to be the convention

* Fix typo calling the wrong method
2021-09-22 15:04:43 +02:00
Tingluo Huang
5afb52b272 Update the comment about the --once in Constants.cs (#1360)
* Update Constants.cs

* feedback.

* Update src/Runner.Listener/Runner.cs

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>
2021-09-21 21:31:48 +00:00
Thomas Boop
cf87c55557 Don't retry 422 (#1352) 2021-09-21 09:59:21 -04:00
Ferenc Hammerl
43fa351980 Update telemetry (#1355)
* Track "pause-logging"

* Bump release version
2021-09-20 15:54:20 +02:00
Ferenc Hammerl
ecfc2cc9e9 Prepare 2.283.0 release (#1351)
* Update releaseNote.md

* Update runnerversion

* Update releaseNote.md

Co-authored-by: Patrick Ellis <319655+pje@users.noreply.github.com>

Co-authored-by: Patrick Ellis <319655+pje@users.noreply.github.com>
2021-09-20 14:59:37 +02:00
Ferenc Hammerl
740fb43731 Generic telemetry (#1321)
* Add generateIdTokenUrl as an env var

* Add generateIdTokenUrl to env vars

* Add basic telemetry class and submit it on jobcompleted

* Use constructor overload

* Rename telemetry to jobTelemetry

* Rename telemetry file

* Make JobTelemetryType a string

* Collect telemetry

* Remove debugger

* Update src/Runner.Worker/ActionCommandManager.cs

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>

* Use same JobTelemetry for all contexts

* Mask telemetry data

* Mask in JobRunner instead

* Empty line

* Change method signature
Returning with a List suggests we clone it and that the
original doesn't change..

* Update launch.json

Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com>
2021-09-20 14:44:50 +02:00
Patrick Ellis
f259e5706f Ephemeral runner deletes local .runner,.credentials files after completion (#1344)
Closes #1337
2021-09-16 11:00:27 -04:00
Ferenc Hammerl
5d84918ed5 Add name to runner context (#1312)
* Add generateIdTokenUrl as an env var

* Add generateIdTokenUrl to env vars

* Add name and runner_group to context

* No longer add runner-group

* Update runner name if needed

* Get interface instead of concrete class

* Check for nulls on ReservedAgent

* Avoid loading setting file unnecesseraly

* Only check agentName once

* Use Trace.Error when can't update settings

* Better equals and exception handling

* Update JobDispatcher.cs

* Add tests and null check
2021-09-16 15:25:51 +02:00
Julio Barba
881c521005 Revert "Recreate VssConnection on retry (#1316)" (#1343)
This reverts commit 4359dd605b.
2021-09-15 13:21:50 -04:00
Patrick Ellis
176e7f5208 Trim trailing whitespace in all md and yml files (#1329)
* Trim non-significant trailing whitespace, add final newlines to md,yml files

* Add .editorconfig with basic whitespace conventions
2021-09-15 13:35:25 +02:00
Jacob Wallraff
b6d46c148a Add attempt number to GitHub context (#1302)
* Add attempt number to GitHub context

* Change context name

* Changing order
2021-09-15 11:00:53 +02:00
Thomas Boop
38e33bb8e3 Update network.md 2021-09-14 15:28:30 -04:00
Patrick Ellis
404b3418b7 Prepare 2.282.0 release (#1327) 2021-09-13 13:56:47 -04:00
Tingluo Huang
7ffd9af644 Support --ephemeral flag (#660)
This optional flag will configure the runner to only take one job, and let the service un-configure the runner after that job finishes.
2021-09-13 11:28:09 -04:00
Thomas Boop
1b69c279f5 Networking TSG (#1325)
* Update Network Troubleshooting doc

* fix list

* Update network.md
2021-09-13 09:53:20 +02:00
Liviu Ionescu
567870dbb8 Avoid ConsoleColor.White, it is unreadable on light themes (#1295) (#1319)
* Avoid white, it is unreadable on light themes (#1295)

* remove ', ConsoleColor.White' from banner

* remove ', ConsoleColor.White' from prompt

* cleanups
2021-09-13 07:50:52 +00:00
Tingluo Huang
72fa2a8a0d Wait for job record updated before running steps. (#1320)
* Wait for job record updated before running steps.

* only oidc
2021-09-09 21:55:15 -04:00
Julio Barba
4359dd605b Recreate VssConnection on retry (#1316) 2021-09-09 19:09:17 -04:00
dependabot[bot]
aab936d081 Bump path-parse in /src/Misc/expressionFunc/hashFiles (#1256)
Bumps [path-parse](https://github.com/jbgutierrez/path-parse) from 1.0.6 to 1.0.7.
- [Release notes](https://github.com/jbgutierrez/path-parse/releases)
- [Commits](https://github.com/jbgutierrez/path-parse/commits/v1.0.7)

---
updated-dependencies:
- dependency-name: path-parse
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-09-01 16:48:41 -04:00
Nancy Hsieh
777ce5a0dc ignore empty arrays in jq select (#1269)
* ignore empty arrays in jq select

* Update delete.sh

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>
2021-09-01 20:39:45 +00:00
Tingluo Huang
1a62162708 Prepare 2.281.1 runner release. (#1305) 2021-09-01 16:15:08 -04:00
Thomas Boop
9a829995e0 Show More Step Information in composite Actions (#1279) 2021-09-01 16:04:27 -04:00
Vladimir Panteleev
c5ce52641c Allow setting default severity to "notice" (#1213) 2021-09-01 16:01:23 -04:00
Nick Fields
e82725b580 Update error to say 'uninstall' not 'unconfigure' (#1179)
* Update error to say 'uninstall' not 'unconfigure'

* Say uninstall service in *nix config error msgs

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>
2021-09-01 16:00:19 -04:00
Daniel Asztalos
0464f77de3 Typo fixed (#1289) 2021-09-01 15:59:18 -04:00
Tingluo Huang
1fc159e0df Temporary fix for macOS runner upgrade crash loop. (#1304) 2021-09-01 15:39:17 -04:00
Ferenc Hammerl
3615fb6923 Runner 2.281.0 (#1298)
* Add generateIdTokenUrl as an env var

* Add generateIdTokenUrl to env vars

* Update runnerversion

* Remove old relese notes

* Update releaseNote.md
2021-08-30 18:57:24 +02:00
Ferenc Hammerl
f61dcad5bb Don't try to login to ghcr.io with GHES tokens (#1291)
* Don't try GHXX tokens for ghcr.io login

* Explain hosted / onpremise in comment

* Nitfix variable name
2021-08-30 11:52:12 +02:00
Tingluo Huang
62d568674c Add ACTIONS_ID_TOKEN_REQUEST_URL/Token to script as well. (#1287) 2021-08-26 13:29:02 -04:00
Ferenc Hammerl
07c00f6a8a PowerShell secret masking (#1258)
* Trim pwsh special chars when masking secrets

* Add pwsh valueEncoder

* Explain regex

* Update ValueEncoders.cs

* Add tests for pwsh color codes in secrets

* Formatting

* Group tests into theories

* Split secret on PS chars and mask for them

* Clean up comments

* Remove unused unittest

* Rename escape methods
2021-08-25 23:07:19 +02:00
Tingluo Huang
05b84297b7 Add extra env for the Token log-in action is going to use to request ID_TOKEN. (#1270) 2021-08-23 14:50:35 -04:00
Thomas Boop
04679b56a9 Runner 2.280.3 Release (#1276) 2021-08-19 08:40:11 -04:00
Thomas Boop
d2ca24fa43 For Main Steps, just run the step, don't check condition (#1273)
* For Main Steps, just run the step, don't check condition

* fix whitespace

* pr feedback
2021-08-18 16:40:25 -04:00
Thomas Boop
abdaacfa6e Runner release 2.280.2 (#1259)
* Runner release 2.280.2

* update

* update
2021-08-12 12:55:45 -04:00
Thomas Boop
53fd7161e2 send path when resolving actions (#1250) 2021-08-11 09:48:32 -04:00
Ferenc Hammerl
ce68f3b167 Allow the use of flags in scripts/create-latest-svc.sh in a backwards compatible way (#1220)
* Use flags in svc creation script

* Refactor regex and add comments

* Fix indentation and typo in user matching

* Consistency use flags in automation scripts

* Update documentation to reflect new usage

* Make example more readable

* Remove test echos from script

* Remove test echo

* Format scripts and remove test script

* Remove tar

* Use getopts and single letter flags

* Update docs to show flag usage

* Update usage of create svc

* Revert svc to not use flags

* Revert delete script

* Update docs

* Readd deleted comments
2021-08-09 10:22:19 +02:00
Thomas Boop
e2c7329292 Release notes for 2.280.1 runner (#1244) 2021-08-04 13:28:32 -04:00
Thomas Boop
22a9d89772 Correctly set post step step context (#1243) 2021-08-04 11:39:22 -04:00
Thomas Boop
3851acd0cf fix continue on error (#1238) 2021-08-03 17:44:58 -04:00
Tingluo Huang
aab4aca8f7 Finish job when worker crashed with IOException. (#1239) 2021-08-03 16:21:39 -04:00
Thomas Boop
5af7b87074 Release notes for runner release 2.290.0 (#1237) 2021-08-03 11:12:43 -04:00
Tingluo Huang
110eb3a5de Add generateIdTokenUrl to env vars for actions. (#1234) 2021-08-02 14:47:50 -07:00
Tingluo Huang
bd1341e580 Print out resolved SHA for each action. (#1233) 2021-08-02 15:59:09 -04:00
Thomas Boop
85ce33b1d3 Composite Code Cleanup (#1232)
* composite polish

* Cleanup Condition Handling

* Refactor ConditionTraceWriter

* pr feedback

* cleanup
2021-08-02 14:57:25 -04:00
Thomas Boop
92ec3d0f29 Add better step telemetry and tracing for composite Actions (#1229)
* Add Step Telemetry

* better telemetry and tracing

* cleanup
2021-07-30 10:45:49 -04:00
Thomas Boop
4e95d0d6ad Support pre/post/container/composite actions within composite actions (#1222)
Support Composite Actions with uses: steps
2021-07-28 15:35:21 -04:00
Thomas Boop
5281434f3f Composite Actions Support ADR (#1144)
Composite Actions ADR
2021-07-27 09:29:50 -04:00
Ferenc Hammerl
e9a8bf29df Prefer higher libicu versions in installDependencies.sh (#1228)
* Update libicu dependencies

* Remove redundant comments (methodname is enough)
2021-07-27 14:30:53 +02:00
dependabot[bot]
a65331e887 Bump lodash in /src/Misc/expressionFunc/hashFiles (#1082)
Bumps [lodash](https://github.com/lodash/lodash) from 4.17.19 to 4.17.21.
- [Release notes](https://github.com/lodash/lodash/releases)
- [Commits](https://github.com/lodash/lodash/compare/4.17.19...4.17.21)

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-07-26 23:34:52 -04:00
dependabot[bot]
908a082527 Bump hosted-git-info in /src/Misc/expressionFunc/hashFiles (#1087)
Bumps [hosted-git-info](https://github.com/npm/hosted-git-info) from 2.8.8 to 2.8.9.
- [Release notes](https://github.com/npm/hosted-git-info/releases)
- [Changelog](https://github.com/npm/hosted-git-info/blob/v2.8.9/CHANGELOG.md)
- [Commits](https://github.com/npm/hosted-git-info/compare/v2.8.8...v2.8.9)

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-07-26 23:33:21 -04:00
dependabot[bot]
10ba74f59b Bump glob-parent in /src/Misc/expressionFunc/hashFiles (#1147)
Bumps [glob-parent](https://github.com/gulpjs/glob-parent) from 5.1.1 to 5.1.2.
- [Release notes](https://github.com/gulpjs/glob-parent/releases)
- [Changelog](https://github.com/gulpjs/glob-parent/blob/main/CHANGELOG.md)
- [Commits](https://github.com/gulpjs/glob-parent/compare/v5.1.1...v5.1.2)

---
updated-dependencies:
- dependency-name: glob-parent
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-07-26 22:55:26 -04:00
Ferenc Hammerl
33ee76df29 Prepare 2.279.0 Release (#1217)
* Bump runner version to 2.279.0

* Update release notes
2021-07-21 17:23:02 +02:00
Ferenc Hammerl
592ce1b230 Better error message when a command is not found (#1210)
* Describe missing util / command error in more detail

* Use more uniform language in traceInfo
2021-07-20 15:26:30 +02:00
Luke Tomlinson
fff31e11c5 Add notice annotation level and support more annotation fields (#1175)
* Add Notice Command

* Add Feature Flag For Enhanced Annotations

* Fix tests

* Add validation for columns and lines

* Fix order to match service

* Remove console.write

* Make Validation Better

* Cleanup

* Handle empty/whitespace strings

* Add more validation for line/column ranges

* Make Validation Debug, Not Throw

* Change casing to 🐫 from 🐍

* Give notice a well known tag

* Cleanup

* Sanitize invalid commands rather than fail
2021-07-13 11:38:16 -04:00
rethab
6443fe8c97 fix typos in docs of JobDispatcher (#1191)
* fix some typos in docs of JobDispatcher

* one more typo

* Update src/Runner.Listener/JobDispatcher.cs

Co-authored-by: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com>
2021-07-13 09:50:51 +02:00
Tingluo Huang
29c09c5bf8 ignore this test 2021-07-08 17:22:46 -04:00
Tingluo Huang
09821e2169 Check runner group when there is only default runner group. (#1172)
* Check runner group when there is only `default` runner group.

* L0
2021-07-02 12:31:54 -04:00
Thomas Boop
7c90b2a929 Composite Actions (#1170)
Composite Actions Refactoring
2021-07-01 13:34:28 -04:00
Tingluo Huang
ee34f4842e Delete runner-basic-e2e-test-case.yml 2021-06-21 13:46:21 -04:00
Tingluo Huang
713344016d Delete e2etest.yml 2021-06-21 13:21:24 -04:00
Darshan Sen
0a6c34669c send SIGKILL after 30s in gracefulShutdown() (#1156)
This resolves the TODO.

Fixes: https://github.com/actions/runner/issues/680
Signed-off-by: Darshan Sen <raisinten@gmail.com>
2021-06-21 12:07:39 -04:00
Tingluo Huang
40d6eb3da3 turn-off e2e test 2021-06-19 02:12:21 -04:00
RyotaK
34a985f3b9 Fix typo in sslcert.md (#1162) 2021-06-18 23:38:52 -04:00
jeff
42fe704132 Add message size in chars and bytes (#1100)
* Add message size in chars and bytes

* Log hash of message body
2021-06-10 15:58:56 -04:00
Ferenc Hammerl
a1bcd5996b Check if service exists before stopping it (Linux) (#1135)
* Check if service exists before stopping it

* Remove empty line (formatting)

* Use the same way as status to check service

* Revert formatting change
2021-06-08 13:27:19 +02:00
eric sciple
31584f4451 add comments (#1137) 2021-06-07 11:58:00 -04:00
Ferenc Hammerl
d4cdb633db Run config remove as user instead of root (#1127)
* Run config remove as user instead of root

* Explain why the runner can't be a service on a container

* Fix typo
2021-06-04 11:30:03 -04:00
Yann Soubeyrand
11939832df Return 1 on service status when service is not installed (#1018) 2021-06-04 10:52:13 -04:00
Rob Bos
ebadce7958 Typo: Changed dockerManger to dockerManager (#1133) 2021-06-04 10:51:30 -04:00
Tingluo Huang
4d5d5b74ee Ignore changes from BuildConstants.cs (#1132) 2021-06-03 16:19:26 -04:00
雪猫
ff12fae2c9 Fix automate script URL (#1089) 2021-06-02 08:47:51 -04:00
Ferenc Hammerl
8e907b19dc Vscode launch scripts (#1117)
* Stop ignoring .vscode (launch scripts)

* Check in launch scripts for config and run

This can cause an issue with existing launch configuration on the machines of contributors.

* Improve error msg when runner is not configured

* Unignore .vscode/launch and tasks only

* Remove stopAtEntry and add eof newline

* Remove Runner.Listener from error message

* Rename tasks and run configs

* Ignore BuildConstants.cs

* Use better error msg

* Explain development steps in depth

* Add launch config to directly debug worker

* Update docs with VS Code tips

* Remove auto-generated comments

* Fix link to quickstart in vscode.md

* Remove ':' from link to quickstart

* Revert "Ignore BuildConstants.cs"

This reverts commit 0f13922a87.

* Replace `.sh` with  `.(sh/cmd)` in docs
2021-06-02 08:34:16 -04:00
Ferenc Hammerl
93ec16e14f Wait for debugger to attach in Runner.Worker process (#1101)
* Wait for debugger to attach in Worker

Only if GITHUB_ACTIONS_RUNNER_ATTACH_DEBUGGER is set

* Only wait if env variable parses to boolean 'true'

* Add 30s timeout to the wait

* Clean up leftover line

* Decrease wait to 20s

* Use ConvertToBoolean isntead of TryParse
2021-05-27 16:05:48 +02:00
Ferenc Hammerl
8863b1fb2c Add configure section to contribute.md (#1119)
Minor formatting improvements
2021-05-26 12:05:38 +02:00
Josh Soref
484ea74ed0 Update 0277-run-action-shell-options.md (#1106) 2021-05-26 12:04:38 +02:00
Josh Soref
f21e280b5c Update 0279-hashFiles-expression-function.md (#1107) 2021-05-26 12:04:24 +02:00
Josh Soref
e0643c694c Update 0361-wrapper-action.md (#1109) 2021-05-26 12:02:54 +02:00
Josh Soref
508d188fb6 Update 0397-runner-registration-labels.md (#1110) 2021-05-26 12:01:52 +02:00
Josh Soref
e7d74da160 Update 0549-composite-run-steps.md (#1111) 2021-05-26 12:01:02 +02:00
Josh Soref
d1f7258356 Update 0263-proxy-support.md (#1104)
Grammar fixes
2021-05-25 16:42:50 +02:00
eric sciple
3a5ab37153 Handle job not found when ensuring previous dispatch finished (#1083) 2021-05-11 09:19:23 -05:00
eric sciple
419ed24c1e rename nestedSteps to embeddedSteps (#1071) 2021-05-01 12:51:01 -05:00
eric sciple
7cc689b0d9 minor cleanup in composite (#1045) 2021-04-30 15:48:53 -05:00
Tingluo Huang
5941cceb7c Unset NODE_ICU_DATA before starting node in NodeHandler. (#1060)
* Unset NODE_ICU_DATA before starting node in NodeHandler.

* Update src/Runner.Worker/Handlers/NodeScriptActionHandler.cs

Co-authored-by: Ross Brodbeck <hross@users.noreply.github.com>

Co-authored-by: Ross Brodbeck <hross@users.noreply.github.com>
2021-04-23 11:10:22 -04:00
Tingluo Huang
088caf5337 Update support platform doc for Apple M1 issue (#1057)
* apple M1 issue

* Update docs/start/envosx.md

Co-authored-by: Ross Brodbeck <hross@users.noreply.github.com>

* Update docs/start/envosx.md

Co-authored-by: Ross Brodbeck <hross@users.noreply.github.com>

Co-authored-by: Ross Brodbeck <hross@users.noreply.github.com>
2021-04-23 09:47:30 -04:00
Ross Brodbeck
08852bd2fc these are not prerelease anymore 2021-04-23 05:26:31 -04:00
Tingluo Huang
57d694197f Not create runner release as pre-release. (#1054) 2021-04-21 17:08:44 -04:00
eric sciple
fc4027b3f1 Bump version (#1048) 2021-04-16 15:45:55 +00:00
eric sciple
d14881b970 do not trucate error message from template evaluation (#1038) 2021-04-06 16:45:40 -04:00
David Wolf
be9632302c Make FileShare ReadWrite (#1033)
* Make FileShare ReadWrite

* Update FileAccess to ReadWrite

* Update dotnet-install.ps1

* Update dotnet-install.ps1

* Update dotnet-install.ps1

* Update dotnet-install.sh
2021-04-01 16:54:23 -04:00
Ross Brodbeck
2b5ddd7c21 Add a default BuildConstants and change encoding on save (#1026) 2021-03-26 12:26:24 -04:00
eric sciple
8109c962f0 mask secrets with double-quotes when passed to docker command line (#1002) 2021-03-05 15:17:55 -06:00
Tim Etchells
af198237ca Delete script files before replacing during update (#984)
* Delete script files before replacing during update

Signed-off-by: Tim Etchells <tetchel@gmail.com>

* Use IOUtil.DelteFile()

Co-authored-by: Tingluo Huang <tingluohuang@github.com>
2021-02-22 23:38:15 -05:00
Alberto Gimeno
1559ff15ec Use GITHUB_TOKEN for ghcr.io containers if credentials are not provided (#990)
* Use GITHUB_TOKEN for ghcr.io containers if credentials are not provided

* Use GITHUB_TOKEN also for containers in containers.pkg.github.com
2021-02-18 21:55:58 -05:00
Thomas Boop
67ff8d3460 Release 2.277.1 runner (#977)
* Revert "Enable tty output from Docker Actions (#916)"

5972bd0060

* Release notes

* add pr
2021-02-09 14:45:33 -05:00
Thomas Boop
6cbfbc3186 Add 2.277.0 release notes (#975)
* add 2.276.2 release notes

* major version these changes
2021-02-09 11:18:55 -05:00
Thomas Boop
195c2db5ef Check Runner Zip Hash on Upgrade (#967)
* Check Hash if it exists on runner update
2021-02-09 10:52:46 -05:00
Tingluo Huang
50994bbb3b add --check to the output of run.sh --help. (#970)
* add --check to the output of run.sh --help.

* feedback.
2021-02-09 10:17:54 -05:00
Lucas Costi
7b03699fbe --check strings grammar improvements (#972) 2021-02-08 00:23:14 -05:00
Hollow Man
8a4cb76508 Fix typos (#969)
accidentially -> accidentally
neglible -> negligible
2021-02-05 13:29:43 -05:00
Yang Cao
bc3099793f Display GITHUB_TOKEN permissions (#966)
* Display GITHUB TOKEN permissions

* Display permission list is best effort

* Remove newtonsoft dependency
2021-02-04 23:10:00 -05:00
Santiago Roman
b76d229da0 Fix usage of /dev/null and ping flag in run.sh (#968)
- Use /dev/null instead of nul
- Use -c instead of -n as a ping flag to specify number of packets to be
  sent
2021-02-04 23:09:27 -05:00
TingluoHuang
fe3994bf1d skip dotnet script testing. 2021-02-04 22:58:10 -05:00
TingluoHuang
0ae09e6713 Revert "update dotnet install script."
This reverts commit 2b4d5542aa.
2021-02-04 22:46:15 -05:00
TingluoHuang
2b4d5542aa update dotnet install script. 2021-02-04 22:32:57 -05:00
Denis Baryshev
6b0f0c00b1 use correct exit code and delay on runner update in run.sh (#963)
Fix runner update script
2021-02-04 22:12:55 -05:00
Tingluo Huang
09760c0d69 Trace process error in RunnerService.js (#955) 2021-02-01 10:03:15 -05:00
Tingluo Huang
8f14466cbb Add http POST to --check. (#949)
* Add http POST to --check.

* feedback.
2021-01-30 22:35:45 -05:00
Thomas Boop
fe8a56f81a Generate SHA's for released packages and include them in package notes (#948)
* Update release.yml

Compute Sha's for release builds

* Update release notes with shas

* Update releaseNote.md

* Update release.yml

* Update release.yml

* Update release.yml

* Add Ability to Get Sha's

* fix typo

* remove debug code
2021-01-28 15:32:41 -05:00
Lokesh Gopu
59b30262ac Update AgentPlatform for job timeline record (#939)
* Update AgentPlatform for job timeline record

* removed unused using
2021-01-25 11:14:28 -05:00
eric sciple
9efcec38cc support authenticated package download (#920) 2021-01-23 14:19:59 -05:00
Joel Dickson
5972bd0060 Enable tty output from Docker Actions (#916)
* Update DockerCommandManager.cs

* Update StepHost.cs

Co-authored-by: Tingluo Huang <tingluohuang@github.com>
2021-01-21 22:35:57 -05:00
Thomas Boop
239cc0d7ca prep 2.276.1 runner release (#929) 2021-01-21 14:02:36 -05:00
Thomas Boop
3fb915450a Runner v2.276.0 fixes (#928)
* Revert "always use Fips Cryptography (#896)"

3b34e203dc

* Revert "Update ldd check with dotnet 5."

4b6ded0a01

* Revert "Update SDK to .NET 5 (#799)"

fc3ca9bb92

* Update dotnet-install scripts
2021-01-21 13:45:16 -05:00
Tingluo Huang
4b6ded0a01 Update ldd check with dotnet 5. 2021-01-15 09:14:55 -05:00
TingluoHuang
0953ffa62b Prepare 2.276.0 runner release. 2021-01-14 13:55:13 -05:00
Robin Neatherway
66727f76c8 Add on: pull_request trigger to CodeQL workflow (#907)
From February 2021, in order to provide feedback on pull requests, Code Scanning workflows must be configured with both `push` and `pull_request` triggers. This is because Code Scanning compares the results from a pull request against the results for the base branch to tell you only what has changed between the two.

Early in the beta period we supported displaying results on pull requests for workflows with only `push` triggers, but have discontinued support as this proved to be less robust.

See https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#scanning-pull-requests for more information on how best to configure your Code Scanning workflows.
2021-01-14 13:26:37 -05:00
Tingluo Huang
7ee333b5cd Adding --check to run a serials network test against GitHub or GHES. (#900)
* add --check.
2021-01-14 13:26:07 -05:00
Tingluo Huang
3b34e203dc always use Fips Cryptography (#896) 2021-01-12 11:05:01 -05:00
Tingluo Huang
e808190dd2 add warning when running out of disk. (#873) 2021-01-05 21:49:11 -05:00
Yang Cao
d2cb9d7685 Ignore certain scenarios so they are not counted as infra failures (#889)
* Ignore certain scenarios so they are not counted as infra failures

* Check to make sure request is not cancelled
2021-01-05 16:08:02 -05:00
Tiago Silva
5ba6a2c78d Add new ANDROID_SDK_ROOT environment variable (#892) 2021-01-05 13:39:47 -05:00
Adeel Mujahid
fc3ca9bb92 Update SDK to .NET 5 (#799) 2020-12-26 00:01:45 -05:00
Tingluo Huang
a94a19bb36 add e2e workflow badge 2020-12-25 22:39:25 -05:00
Tingluo Huang
a9be5f6557 Update job name in logs. 2020-12-25 22:17:24 -05:00
Tingluo Huang
3600f20cd3 e2e test workflow (#885) 2020-12-25 21:54:46 -05:00
Tingluo Huang
81a00fff3e config runner via PAT. (#874) 2020-12-25 21:54:23 -05:00
Josh Gross
31474098ff Add runtime team as CODEOWNERS (#872) 2020-12-18 14:21:15 -05:00
David Kale
7ff6ff6afa Prepare 2.275.1 2020-12-14 16:36:31 -05:00
Tingluo Huang
56529a1c2f fix compat issue in timeline record state. (#861) 2020-12-14 15:43:00 -05:00
David Kale
510fadf71a Prepare m275 (#860) 2020-12-14 11:02:44 -05:00
klassiker
007ac8138b Add proxy support for container actions (#840)
* Add proxy support for container actions in Runner.Worker/StepsRunner

* Move proxy modifications to ContainerActionHandler
2020-12-11 13:08:45 -05:00
Yang Cao
1e12b8909a Count actions resolve failures as infra failures (#851)
During job run we may fail to resolve actions download info, and this
stack is fully controlled by GitHub actions so it should be counted as
infrastructure failure instead of user failure.
2020-12-11 11:07:43 -05:00
Tingluo Huang
9ceb3d481a unset GTIHUB_ACTION_REPOSITORY and GITHUB_ACTION_REF for non-repo based actions. (#804) 2020-12-11 11:04:07 -05:00
Bruno FERNANDO
3bce2eb09c feat(scripts): add labels in the script that register runner (#844) 2020-12-11 11:03:04 -05:00
David Kale
80bf68db81 Crypto cleanup and enable usage of FIPS compliant crypto when required (#806)
* Use FIPS compliant crypto when required

* Comment cleanup

* Store OAuth signing scheme in credentialData instead of runner setting

Add encryption scheme for job message encyption key to session

Further cleanup of unused crypto code

* Update windows rsa key manager to use crossplat dotnet RSA api

* Undo unneeded ConfigurationManager change
2020-12-04 11:35:16 -05:00
Thomas Boop
a2e32170fd Disable set-env and add-pathcommands (#779)
* Disable Old Runner Commands set-env and add-path

* update dotnet install scripts

* update runner version and release notes
2020-11-16 08:20:43 -05:00
Thomas Boop
35dda19491 Add deprecation date and release 2.274.1 version (#796) 2020-11-09 09:01:47 -05:00
Julio Barba
36bdf50bc6 Prepare the release of 2.274.0 runner 2020-11-05 10:25:24 -05:00
Chris Gavin
95e2158dc6 Add an environment variable to indicate which repository the currently running Action came from. (#585)
* add `workflow_dispatch`

* Add an environment variable to indicate which repository the currently running Action came from.

* Expose the Action ref as well.

* Move setting `github.action_repository` and `github.action_ref` to `ActionRunner.cs`.

* Don't set `action_repository` and `action_ref` for local Actions.

Co-authored-by: Tingluo Huang <tingluohuang@github.com>
2020-11-03 14:39:17 -05:00
Jason Laqua
3ebaeb9f19 Fixes #759 doesn't change proxy environment variables (#760)
* Fixes #759 doesn't change proxy environment variables

* Update RunnerWebProxy.cs

* Update RunnerWebProxyL0.cs

Co-authored-by: Tingluo Huang <tingluohuang@github.com>
2020-11-03 10:47:30 -05:00
shinriyo
9d678cb270 DRY and add sudo (#687)
remove 3 "redundant" text and put one text for DRY.
and developers always forget `sudo` and annoying `Need to run with sudo privilege` message.
so, add first.
2020-11-02 21:38:35 -05:00
Tingluo Huang
27788491ea raise error for set-env, block set node_options. (#784)
* raise error for set-env, block set node_options.

* feedback.
2020-11-02 14:09:29 -05:00
Yashwanth Anantharaju
5ba7affea4 fix in correct check (#778) 2020-10-30 14:34:00 -04:00
Robin Neatherway
ce92d7a6b5 Change ping .. > nul to sleep (#647)
* Change `ping .. > nul` to `sleep`

The filename `nul` is a Windows-ism that causes the update script to
create such a file in the current working directory. The `ping`
utility is also an dependency not installed by
`installdependencies.sh`, so it seemed easier to change it to the
standard `sleep` command.

* Update dotnet-install script as requested by test

* Update dotnet-install.ps1

Co-authored-by: Tingluo Huang <tingluohuang@github.com>
2020-10-29 10:15:30 -04:00
Temtaime
d23ca0ba7a Add .editorconfig (#768)
* Add .editorconfig

* Create .editorconfig
2020-10-27 10:50:50 -04:00
dependabot[bot]
9d1c81f018 Bump @actions/core in /src/Misc/expressionFunc/hashFiles (#729)
Bumps [@actions/core](https://github.com/actions/toolkit/tree/HEAD/packages/core) from 1.2.0 to 1.2.6.
- [Release notes](https://github.com/actions/toolkit/releases)
- [Changelog](https://github.com/actions/toolkit/blob/main/packages/core/RELEASES.md)
- [Commits](https://github.com/actions/toolkit/commits/HEAD/packages/core)

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2020-10-26 23:29:08 -04:00
Josh Soref
7a8abe726a Improve apt handling (#708)
* Unify apt/apt-get logic

The previous logic was buggy in that it tried to use `apt` in the `apt-get` branch after deciding that `apt` was unavailable...

* Prefer apt-get over apt

apt does not have a stable cli and using it from scripts yields annoying messages

* Improve English for missing apt-get & apt case

* Fix apt-get/apt fallback behavior for $ patterns

If there's a `$` in the apt install pattern, it will not fail if it selects a thing and decides it isn't interested in installing it.

* Fix spelling of libssl
2020-10-26 23:27:09 -04:00
Łukasz Łaniewski-Wołłk
a9135e61a0 Correcting bug in check of libicu presence (#695) 2020-10-26 23:14:17 -04:00
Justin Weissig
feafd3e1d7 fixed grammar issues (#672)
Nothing major here just minor wording.
2020-10-26 23:11:30 -04:00
Justin Weissig
dc3b2d3a36 fixed wording (#671)
Fixed a few minor grammar issues
2020-10-26 23:10:59 -04:00
Justin Weissig
a371309079 minor spelling & grammar tweaks (#670)
Fixed a few minor spelling & grammar issues.
2020-10-26 23:10:26 -04:00
Justin Weissig
5dd6bde4ca fixed minor spelling mistake (#669)
Changed enhancment to enhancement.
2020-10-26 23:09:38 -04:00
Tingluo Huang
c196103e58 update dotnet install script. 2020-10-26 23:07:57 -04:00
Fabian Mastenbroek
d55070da3e Update to .NET Core SDK 3.1.302 (#681)
This change updates the .NET Core SDK used by the Actions Runner to
version 3.1.302 to address the issues that are caused by the following issue:
    https://github.com/dotnet/runtime/issues/13475
See #574 for more information.

Fixes #574
2020-10-26 22:51:29 -04:00
Yashwanth Anantharaju
8279ae9a70 Support environment URL parsing (#762)
* environment URL parsing
2020-10-21 12:14:21 -04:00
Hayden Faulds
2e3b03623f log runner group name (#696)
* log runner group name

* linting
2020-10-16 14:56:06 +01:00
Thomas Boop
c18c8746db Release notes for 2.273.5 (#734) 2020-10-02 11:49:49 -04:00
Thomas Boop
6332a52d76 Notify on unsecure commands (#731)
* notify on unsecure commands
2020-10-02 11:34:37 -04:00
Yang Cao
8bb588bb69 Expose retention days in env for toolkit/artifacts package (#714) 2020-09-17 15:11:12 -04:00
David Kale
4510f69c73 Prepare 273.4 release 2020-09-17 18:19:42 +00:00
David Kale
c7b8552edf Prepare 2.273.3 release 2020-09-16 15:06:07 +00:00
Julio Barba
0face6e3af Preparing the release of 2.273.2 runner 2020-09-14 13:06:41 -04:00
eric sciple
306be41266 fix bug w checkout v1 updating GITHUB_WORKSPACE (#704) 2020-09-14 12:00:00 -04:00
David Kale
4e85b8f3b7 Allow registry credentials for job/service containers (#694)
* Log in with container credentials if given

* Stub in registry aware auth for later

* Fix hang if password is empty

* Remove default param to fix build

* PR Feedback. Add some tests and fix parse
2020-09-11 12:28:58 -04:00
Julio Barba
444332ca88 Prepare the release of 2.273.1 runner 2020-09-08 13:01:36 -04:00
Thomas Boop
e6eb9e381d Cleanup FileCommands (#693) 2020-09-04 15:35:36 -04:00
eric sciple
3a76a2e291 read env file (#683) 2020-08-29 23:18:35 -04:00
Thomas Boop
9976cb92a0 Add Runner File Commands (#684)
* Add File Runner Commands
2020-08-28 15:32:25 -04:00
Thomas Brumley
d900654c42 Add in Log line numbers for streaming logs (#663)
* Add in Log line

Co-authored-by: yaananth (Yash) <yaananth@github.com>
2020-08-25 12:02:29 -04:00
Julio Barba
65e3ec86b4 Set executable bit 2020-08-18 16:09:04 -04:00
Julio Barba
a7f205593a Update dotnet scripts 2020-08-18 16:03:06 -04:00
Julio Barba
55f60a4ffc Prepare the release of 2.273.0 runner 2020-08-17 15:41:15 -04:00
Ethan Chiu
ca13b25240 Fix Outputs Example (#658) 2020-08-14 11:55:27 -04:00
Timo Schilling
b0c2734380 fix endgroup maker (#640) 2020-08-14 11:53:30 -04:00
Ethan Chiu
9e7b56f698 Fix Null Ref Issues Composite Actions (#657) 2020-08-12 17:12:54 -04:00
Ethan Chiu
8c29e33e88 Fix DisplayName Changing in middle of composite action run (#645) 2020-08-10 16:26:23 -04:00
Ethan Chiu
976217d6ec Free up memory from step level outputs in composite action (#641) 2020-08-10 14:31:30 -04:00
Ethan Chiu
562eafab3a Adding Documentation to ADR for Support for Script Execution + Explicit Definition (#616) 2020-08-10 11:30:34 -04:00
Joe Bourne
9015b95a72 Updating virtual environment terminology (#651)
* Dropping pool terminology

* Update README.md
2020-08-07 15:52:56 -04:00
eric sciple
7d4bbf46de fix feature flag check; omit context for generated context names (#638) 2020-08-04 11:12:40 -04:00
Christopher Johnson
7b608e3e92 Adding help text for the new runnergroup feature (#626)
Co-authored-by: Christopher Johnson <thchrisjohnson@github.com>
2020-07-30 12:03:40 -04:00
Ethan Chiu
f028b4e2b0 Revert JobSteps to Queue Data Structure (#625)
* Revert JobSteps to Queue data structure

* Revert tests
2020-07-29 16:19:04 -04:00
TingluoHuang
38f816c2ae prepare release 2.272.0 runner. 2020-07-29 15:31:45 -04:00
efyx
bc1fe2cfe0 Fix poor performance of process spawned from svc daemon (#614) 2020-07-29 15:20:28 -04:00
Ethan Chiu
89a13db2c3 Remove TESTING_COMPOSITE_ACTIONS_ALPHA Env Variable (#624) 2020-07-29 15:12:15 -04:00
Ethan Chiu
d59092d973 GITHUB_ACTION_PATH + GITHUB_ACTION so that we can run scripts for Composite Run Steps (#615)
* Add environment variable for GITHUB_ACTION_PATH

* ah

* Remove debugging messages

* Set github action path at step level instead of global scope to avoid necessary removal

* Remove set context for github action

* Set github action path before and after composite action

* Copy GitHub Context, use this copied context for each composit step, and then set the action_path for each one (to avoid stamping over parent pointer GitHubContext
2020-07-29 14:28:14 -04:00
Ethan Chiu
855b90c3d4 Explicitly define what is allowed for a composite action (#605)
* Explicitly define what is allowed for an action

* Add step-env

* Remove secrets + defaults

* new line

* Add safety check to prevent from checking defaults in ScriptHandler for composite action

* Revert "Add safety check to prevent from checking defaults in ScriptHandler for composite action"

This reverts commit aeae15de7b.

* Need to explictly use ActionStep type since we need the .Inputs attribute which is only found in the ActionStep not IStep

* Fix ActionManifestManager

* Remove todos

* Revert "Revert "Add safety check to prevent from checking defaults in ScriptHandler for composite action""

This reverts commit a22fcbc036.

* revert

* Remove needs in env

* Make shell required + add inputs

* Remove passing context to all composite steps attribuyte
2020-07-28 10:15:46 -04:00
Christopher Johnson
48ac96307c Add ability to register a runner to the non-default self-hosted runner group (#613)
Co-authored-by: Christopher Johnson <thchrisjohnson@github.com>
2020-07-23 17:46:48 -04:00
Ethan Chiu
2e50dffb37 Clean Up Composite UI (#610)
* Remove redundant code (display name is already evaluated in ActionRunner beforehand for each step)

* remove

* Remove nesting information for composite steps.

* put messages in debug logs if composite. if not, put these messages as outputs

* Fix group issue

* Fix end group issue
2020-07-23 09:45:00 -04:00
Tingluo Huang
e7b0844772 Add manual trigger 2020-07-23 00:34:36 -04:00
Ethan Chiu
d5a5550649 Fix Timeout-minutes for Whole Composite Action Step (#599)
* Exploring child Linked Cancellation Tokens

* Preliminary Timeout-minutes fix

* Final Solution for resolving cancellation token's timeout vs. cancellation

* Clean up + Fix error handling

* Use linked tokens instead

* Clean up

* one liner

* Remove JobExecutionContext => Replace with public Root accessor

* Move CreateLinkedTokenSource in the CreateCompositeStep Function
2020-07-22 18:01:50 -04:00
Ethan Chiu
3d0147d322 Improve Debugging Messages for Empty Tokens (#609)
* Improve Debugging Messages for Empty Tokens

* fix tests
2020-07-22 17:34:40 -04:00
Ethan Chiu
bd1f245aac Clarify details for defaults, shell, and working-dir (#607) 2020-07-22 16:11:55 -04:00
Steven Maude
005f1c15b1 Fix "propogate" typo in ADR 0549 (#600) 2020-07-22 16:10:57 -04:00
David Kale
da3cb5506f Fold logs for intermediate docker commands (#608) 2020-07-22 14:55:49 -04:00
dependabot[bot]
32d439070b Bump lodash in /src/Misc/expressionFunc/hashFiles (#603)
Bumps [lodash](https://github.com/lodash/lodash) from 4.17.15 to 4.17.19.
- [Release notes](https://github.com/lodash/lodash/releases)
- [Commits](https://github.com/lodash/lodash/compare/4.17.15...4.17.19)

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2020-07-20 10:21:01 -04:00
jeffrey
ec9f8f1682 dbl quotes around variable so CD works if path contains spaces (#602) 2020-07-20 10:19:37 -04:00
eric sciple
0921af735a move shared ExecutionContext properties under .Global (#594) 2020-07-19 19:05:47 -04:00
eric sciple
1cc3c08cf2 Prepare to switch GITHUB_ACTION to use ContextName instead of refname (#593)
This PR changes GITHUB_ACTION to use the step ContextName, instead of refname. The behavior is behind a feature flag. Refname is an otherwise deprecated property.

Primary motivation: For composite actions, we need a distinct GITHUB_ACTION for each nested step. This PR adds code to generate a default context name for nested steps.

For nested steps, GITHUB_ACTION will be set to "{ScopeName}.{ContextName}" to ensure no collisions.

A corresponding change will be made on the server so context name is never empty. Generated context names will start with "__".

A follow-up PR is required to avoid tracking "step" context values (outputs/conclusion/result) for generated context names. Waiting on telemetry from the server to confirm it's safe to assume leading "__" is a generate context name.
2020-07-19 17:19:13 -04:00
Ethan Chiu
f9dca15c63 Composite Run Steps Refactoring (#591)
* Add basic framework for baby steps runner

* Basic logic for adding steps / invoking composite action steps

* Composite Steps Runner MVP

* Fix null object reference error

* intialize composiute

* Comment out code that is handled by stepsrunner

* Add composite clean up step

* Remove previous 'workarounds' from StepsRunner. Clean Up PR

* Remove todo

* Remove todo

* Fix using unitialized object yikes

* Remove time delay

* Format handler

* Move output handler into action handler

* Add try to evaluate display name

* Remove while loop yikes

* Abstract away the windows encoding check during step running

* Github context set to {ScopeName}.{ContextName} or {ContextName} if ScopeName is null

* Remove setting result to sucess since result defaults to sucess

* Fix windows error

* Fix windows

* revert:

* Windows fix

* Fix Windows Error in Abstraction

* Remove Composite Steps Runner => consolidate into Composite Steps Runner

* Remove unn. attribute in ExecutionContext

* Change protection levels, plus change function name to more clear meaning

* Remove location param

* location pt.2 fix

* Remove outputs step

* Remove temp directory

* new line

* Add arguitl not null

* better comment

* Change encoding name

* Check count > 0 for composite steps, import System.Threading

* Change function header encodingutil

* Add TODO

* Add await

* Handle Failed Step

* Move over SetAllCompositeOutputs to the handler

* Remove timeout-minutes setting in steps-level

* Use only ExecutionContext

* Move using to the top

* Remove redundant check

* Change function name

* Remove testing code

* Consolidate error code

* Consolidate code

* Change HandleOutput => ProcessCompositeActionOutputs

* Remove set the timeout comment

* Add Cancelling functionality + Remove unn. parameter
2020-07-17 16:31:48 -04:00
eric sciple
0877d9a533 Update StringUtil.cs 2020-07-16 10:30:42 -04:00
eric sciple
d5e40c6a60 Update 0549-composite-run-steps.md 2020-07-15 20:00:45 -04:00
eric sciple
391bc35bb9 Update 0549-composite-run-steps.md 2020-07-15 19:59:13 -04:00
eric sciple
e4267b8434 Update 0549-composite-run-steps.md 2020-07-15 19:57:22 -04:00
TingluoHuang
2709cbc0ea rename master to main. 2020-07-14 13:37:20 -04:00
Ethan Chiu
5e0cde8649 Composite Actions UI (#578)
* Composite Action Run Steps

* Env Flow => Able to get env variables and overwrite current env variables => but it doesn't 'stick'

* clean up

* Clean up trace messages + add Trace debug in ActionManager

* Add debugging message

* Optimize runtime of code

* Change String to string

* Add comma to Composite

* Change JobSteps to a List, Change Register Step function name

* Add TODO, remove unn. content

* Remove unnecessary code

* Fix unit tests

* Fix env format

* Remove comment

* Remove TODO message for context

* Add verbose trace logs which are only viewable by devs

* Initial Start for FileTable stuff

* Progress towards passing FileTable or FileID or FileName

* Sort usings in Composite Action Handler

* Change 0 to location

* Update context variables in composite action yaml

* Add helpful error message for null steps

* Pass fileID to all children token of root action token

* Change confusing term context => templateContext, Eliminate _fileTable and only use ExecutionContext.FileTable + update this table when need be

* Remove unnessary FileID attribute from CompositeActionExecutionData

* Clean up file path for error message

* Remove todo

* Initial start/framework for output handling

* Outline different class vs Handler approach

* Remove InitializeScope

* Remove InitializeScope

* Fix Workflow Step Env overiding Parent Env

* First Approach for Attaching ID + Group ID to each Composite Action Step

* Add GroupID to the ActionDefinitionData

* starting foundation for handling clean up outputs step

* Pass outputs data to each composite action step to enable set-output functionality

* Create ScopeName for whole composite action.

This will enable us to add to the StepsContext[ScopeName] for the composite action which will allow us to use all these outputs in the cleanup step

* Hook up composite output step to handler => tmmrw implement composite output handler

* Add post composite action step to cleanup outputs => triggers composite output cleanup handler

* Fix Outputs Token handling start. Add individual step scope names.

* Set up Scope Name and Context Name naming system{

* Figured out how to pass Parent Execution Context to clean up step

* Figured out how to pass Parent Execution Context and scope names to
clean up step

* Add GetOutput function for StepsContext

* Generate child scope name correctly if parent scope name is null

* Simplify InitializeScope()

* Outputs are set correctly and able to get all final outputs in handler

* Parse through Action Outputs

* Fix null ScopeName + ContextName in CompositeOutputHandler

* Shift over handling of Action Outputs to output handler

* First attempt to fix null retrievals for output variables

* Basic Support for Outputs Done.

* Clean up pt.1

* Refactor outputs to avoid using Action Reference

* Clean up code

* Clean up part 2

* Add clarifying comments for the output handler

* Remove TODO

* Remove env in composite action scope

* Clean up

* Revert back

* revert back

* add back envToken

* Remove unnecessary code

* Add file length check

* Clean up

* Fix logging issue

* Figure out how to handle set-env edge cases

* formatting

* fix unit tests

* Fix windows unit test syntax error

* Fix period

* Sanity check for fileTable add + remove unn. code

* revert back

* Add back line break

* Fix null errors

* Address situation if FileTable is null + add sanity check for adding file to fileTable

* add line

* Revert

* Fix unit tests to instantiate a FileTable

* Fix logic for trimming manifestfile path

* Add null check

* Revert

* Revert

* revert

* spacing

* Add filetable to testing file, remove ? since we know filetable should never be non null

* Fix Throw logic

* Clarify template outputs token

* Add another type support for outputs to avoid container unit tests errors

* Add mapping for parity

* Build support for new outputs format

* Build support for new outputs format

* Refactor to avoid duplication of action yaml for workflow yaml

* revert

* revert

* revert

* spacing
2020-07-13 17:55:15 -04:00
Ethan Chiu
cb2b323781 Composite Run Steps Outputs (#568)
* Composite Action Run Steps

* Env Flow => Able to get env variables and overwrite current env variables => but it doesn't 'stick'

* clean up

* Clean up trace messages + add Trace debug in ActionManager

* Add debugging message

* Optimize runtime of code

* Change String to string

* Add comma to Composite

* Change JobSteps to a List, Change Register Step function name

* Add TODO, remove unn. content

* Remove unnecessary code

* Fix unit tests

* Fix env format

* Remove comment

* Remove TODO message for context

* Add verbose trace logs which are only viewable by devs

* Initial Start for FileTable stuff

* Progress towards passing FileTable or FileID or FileName

* Sort usings in Composite Action Handler

* Change 0 to location

* Update context variables in composite action yaml

* Add helpful error message for null steps

* Pass fileID to all children token of root action token

* Change confusing term context => templateContext, Eliminate _fileTable and only use ExecutionContext.FileTable + update this table when need be

* Remove unnessary FileID attribute from CompositeActionExecutionData

* Clean up file path for error message

* Remove todo

* Initial start/framework for output handling

* Outline different class vs Handler approach

* Remove InitializeScope

* Remove InitializeScope

* Fix Workflow Step Env overiding Parent Env

* First Approach for Attaching ID + Group ID to each Composite Action Step

* Add GroupID to the ActionDefinitionData

* starting foundation for handling clean up outputs step

* Pass outputs data to each composite action step to enable set-output functionality

* Create ScopeName for whole composite action.

This will enable us to add to the StepsContext[ScopeName] for the composite action which will allow us to use all these outputs in the cleanup step

* Hook up composite output step to handler => tmmrw implement composite output handler

* Add post composite action step to cleanup outputs => triggers composite output cleanup handler

* Fix Outputs Token handling start. Add individual step scope names.

* Set up Scope Name and Context Name naming system{

* Figured out how to pass Parent Execution Context to clean up step

* Figured out how to pass Parent Execution Context and scope names to
clean up step

* Add GetOutput function for StepsContext

* Generate child scope name correctly if parent scope name is null

* Simplify InitializeScope()

* Outputs are set correctly and able to get all final outputs in handler

* Parse through Action Outputs

* Fix null ScopeName + ContextName in CompositeOutputHandler

* Shift over handling of Action Outputs to output handler

* First attempt to fix null retrievals for output variables

* Basic Support for Outputs Done.

* Clean up pt.1

* Refactor outputs to avoid using Action Reference

* Clean up code

* Clean up part 2

* Add clarifying comments for the output handler

* Remove TODO

* Remove env in composite action scope

* Clean up

* Revert back

* revert back

* add back envToken

* Remove unnecessary code

* Add file length check

* Clean up

* Figure out how to handle set-env edge cases

* formatting

* fix unit tests

* Fix windows unit test syntax error

* Fix period

* Sanity check for fileTable add + remove unn. code

* revert back

* Add back line break

* Fix null errors

* Address situation if FileTable is null + add sanity check for adding file to fileTable

* add line

* Revert

* Fix unit tests to instantiate a FileTable

* Fix logic for trimming manifestfile path

* Add null check

* Revert

* Revert

* revert

* spacing

* Add filetable to testing file, remove ? since we know filetable should never be non null

* Fix Throw logic

* Clarify template outputs token

* Add another type support for outputs to avoid container unit tests errors

* Add mapping for parity

* Build support for new outputs format

* Refactor to avoid duplication of action yaml for workflow yaml

* Move SDK work in ActionManifestManager, Condense Code

* Defer runs evaluation till after for loop to ensure order doesn't matter

* Fix logic error in setting scope and context names

* Add Regex + Add Child Context name null resolution

* move private function to bottom of class
2020-07-13 17:23:19 -04:00
Ethan Chiu
6c3958f365 Composite Run Steps ADR (#554)
* start

* Inputs + Outputs

* Clarify docs

* Finish Environment

* Add if condition

* Clarify language

* Update 0549-composite-run-steps.md

* timeout-minutes

* Finish

* add relevant example

* Fix syntax

* fix env example

* fix yaml syntax

* Update 0549-composite-run-steps.md

* Update file names, add more relevant example if condition

* Add note to continue-on-error

* Apply changes to If Condition

* bolding

* Update 0549-composite-run-steps.md

* Update 0549-composite-run-steps.md

* Update 0549-composite-run-steps.md

* Update 0549-composite-run-steps.md

* Syntax support + spacing

* Add guiding principles.

* Update 0549-composite-run-steps.md

* Reverse order.

* Update 0549-composite-run-steps.md

* change from job to step

* Update 0549-composite-run-steps.md

* Update 0549-composite-run-steps.md

* Update 0549-composite-run-steps.md

* Add Secrets

* Update 0549-composite-run-steps.md

* Update 0549-composite-run-steps.md

* Fix output example

* Fix output example

* Fix action examples to use using.

* fix output variable name

* update workingDir + env

* Defaults + continue-on-error

* Update Outputs Section

* Eliminate Env

* Secrets

* Update timeout-minutes

* Update 0549-composite-run-steps.md

* Update 0549-composite-run-steps.md

* Fix example.

* Remove TODOs

* Update 0549-composite-run-steps.md

* Update 0549-composite-run-steps.md
2020-07-13 12:30:31 -04:00
Ethan Chiu
9d7bd4706b Improve Error Messaging for Actions by Using ExecutionContext's FileTable as Single Source of Truth and by Passing FileID to All Children Tokens. (#564)
* Composite Action Run Steps

* Env Flow => Able to get env variables and overwrite current env variables => but it doesn't 'stick'

* clean up

* Clean up trace messages + add Trace debug in ActionManager

* Add debugging message

* Optimize runtime of code

* Change String to string

* Add comma to Composite

* Change JobSteps to a List, Change Register Step function name

* Add TODO, remove unn. content

* Remove unnecessary code

* Fix unit tests

* Fix env format

* Remove comment

* Remove TODO message for context

* Add verbose trace logs which are only viewable by devs

* Initial Start for FileTable stuff

* Progress towards passing FileTable or FileID or FileName

* Sort usings in Composite Action Handler

* Change 0 to location

* Update context variables in composite action yaml

* Add helpful error message for null steps

* Pass fileID to all children token of root action token

* Change confusing term context => templateContext, Eliminate _fileTable and only use ExecutionContext.FileTable + update this table when need be

* Remove unnessary FileID attribute from CompositeActionExecutionData

* Clean up file path for error message

* Remove todo

* Fix Workflow Step Env overiding Parent Env

* Remove env in composite action scope

* Clean up

* Revert back

* revert back

* add back envToken

* Remove unnecessary code

* Add file length check

* Clean up

* Figure out how to handle set-env edge cases

* formatting

* fix unit tests

* Fix windows unit test syntax error

* Fix period

* Sanity check for fileTable add + remove unn. code

* revert back

* Add back line break

* Fix null errors

* Address situation if FileTable is null + add sanity check for adding file to fileTable

* add line

* Revert

* Fix unit tests to instantiate a FileTable

* Fix logic for trimming manifestfile path

* Add null check

* Add filetable to testing file, remove ? since we know filetable should never be non null
2020-07-08 17:15:16 -04:00
Ethan Chiu
5822a38c39 Add bash command for running custom runner (#569) 2020-07-08 11:20:38 -04:00
Ethan Chiu
d42c9da2d7 Composite Actions: Support Env Flow (#557)
* Composite Action Run Steps

* Env Flow => Able to get env variables and overwrite current env variables => but it doesn't 'stick'

* clean up

* Clean up trace messages + add Trace debug in ActionManager

* Add debugging message

* Optimize runtime of code

* Change String to string

* Add comma to Composite

* Change JobSteps to a List, Change Register Step function name

* Add TODO, remove unn. content

* Remove unnecessary code

* Fix unit tests

* Fix env format

* Remove comment

* Remove TODO message for context

* Add verbose trace logs which are only viewable by devs

* Sort usings in Composite Action Handler

* Change 0 to location

* Update context variables in composite action yaml

* Add helpful error message for null steps

* Fix Workflow Step Env overiding Parent Env

* Remove env in composite action scope

* Clean up

* Revert back

* revert back

* add back envToken

* Remove unnecessary code

* Figure out how to handle set-env edge cases

* formatting

* fix unit tests

* Fix windows unit test syntax error
2020-07-08 10:16:51 -04:00
eric sciple
121deedeb5 Fix trailing '.0' for Int64 values (#572) 2020-06-30 17:25:47 -04:00
Ethan Chiu
a0942ed345 Composite Actions Support for Multiple Run Steps (#549)
* Composite Action Run Steps

* Clean up trace messages + add Trace debug in ActionManager

* Change String to string

* Add comma to Composite

* Change JobSteps to a List, Change Register Step function name

* Add TODO, remove unn. content

* Remove unnecessary code

* Fix unit tests

* Add verbose trace logs which are only viewable by devs

* Sort usings in Composite Action Handler

* Change 0 to location

* Update context variables in composite action yaml

* Add helpful error message for null steps
2020-06-23 15:35:32 -04:00
TingluoHuang
7cef9a27ca release 2.267.0 runner. 2020-06-23 14:05:28 -04:00
Tingluo Huang
df7e16954e print runner and machine name to log. (#539) 2020-06-23 13:57:37 -04:00
eric sciple
4e7d27a53c remove temporary logic when resolving action download info (#550) 2020-06-15 13:13:47 -04:00
Lokesh Gopu
89d1418e48 Update exception message (#540) 2020-06-11 17:25:50 -04:00
Tingluo Huang
e728b8594d fix race condition. (#538) 2020-06-11 16:17:24 -04:00
Tingluo Huang
de4490d06d Restore SELinux context on service file when SELinux is enabled (#525) 2020-06-11 15:40:09 -04:00
Tingluo Huang
2e800f857e Skip search $PATH on command with fully qualified path (#526) 2020-06-11 13:52:42 -04:00
Tingluo Huang
312c7668a8 Fix DataContract with Token service (#532) 2020-06-11 12:11:35 -04:00
Tingluo Huang
eaf39bb058 add libicu66 for Ubuntu 20.04 (#535) 2020-06-11 12:11:13 -04:00
eric sciple
5815819f24 Resolve action download info (#515) 2020-06-09 08:53:28 -04:00
Ethan Chiu
1aea046932 Add substep for developer flow for clarity (#523) 2020-06-08 10:47:58 -04:00
Ethan Chiu
eda463601c Update Links and Language to Git + VSCode (#522) 2020-06-08 10:19:17 -04:00
Nick Fields
f994ae0542 Reduce input validation warnings (#506)
* Only raise a single warning for unexpected inputs

* Update invalid input test to raise single warning
2020-06-05 23:09:14 -04:00
Tingluo Huang
3c5aef791c Fix null ref exception in SecretMasker caused by hashfiles timeout. (#516) 2020-06-05 23:02:10 -04:00
Tingluo Huang
c4626d0c3a Remove SPS/Token migration code. Remove GHES url manipulate code. (#513)
* Remove SPS/Token migration code. Remove GHES url manipulate code.

* feedback.
2020-06-03 23:24:53 -04:00
eric sciple
416a7ac4b8 prepare to switch to service resolves archive download info (#508) 2020-06-02 17:21:50 -04:00
TingluoHuang
11435857e4 prepare 2.263.0 runner release. 2020-05-21 15:49:10 -04:00
Tingluo Huang
6f260012a3 Fix inputs validation warning, fix post step display name, fix worker crash due to error in step.env (#490) 2020-05-21 11:09:50 -04:00
eric sciple
4fc87ddfc6 fix problem matcher for GHES (#488) 2020-05-19 16:15:03 -04:00
eric sciple
b45c1b9440 switch GITHUB_URL to GITHUB_SERVER_URL (#482) 2020-05-18 13:02:30 -04:00
Quan TRAN
73307c0a30 jq returns "null" if the field does not exist (#478) 2020-05-14 11:09:20 -04:00
Tingluo Huang
cd8e4ddba1 Validate inputs only for repo action, no warning for small delay. (#476)
* validate inputs only for repo action, no warning for small delay.

* l0
2020-05-12 16:09:13 -04:00
Tingluo Huang
abf59bdcb6 Fix configure as service with runner name has space. (#474) 2020-05-12 16:08:50 -04:00
Brian Cristante
09cf59c1e0 Use an env var to point to an Actions Service dev instance (#468)
* Use an environment variable for the testing backdoor

* Make the env var a boolean

We'll still have to pass the URL on the command line

* Empty commit to rerun CI
2020-05-11 17:14:02 -04:00
TingluoHuang
7a65236022 prepare 2.262.0 runner release. 2020-05-11 15:05:59 -04:00
David Kale
462b5117c8 docker build using -f instead of implied default (#471)
* pass -f to docker build

* Wrong place

* build path

* Also pass docker context path

* Tidy up format

* PR Feedback
2020-05-11 13:57:31 -04:00
Tingluo Huang
6922f3cb86 sps/token migration tweak, ActionResult casing. (#462) 2020-05-11 12:36:35 -04:00
Tingluo Huang
911135e66c add help info for '--labels' (#472) 2020-05-11 12:36:16 -04:00
PJ Quirk
01c9a8a8af Remove community actions munging and add dotcom fallback for downloading actions (#469)
* Fallback to dotcom rather than munged community actions

* Encapsulate the link and the auth details

* Rename the method to be clearer

* Remove BOM
2020-05-11 12:23:02 -04:00
eric sciple
33d2d2c328 update checkout@v1 for GHES (#470) 2020-05-11 11:41:16 -04:00
Justin Hutchings
a246b3b29d Add CodeQL Analysis workflow (#459)
* Add CodeQL Analysis workflow

* Fix path

* Add manual build step

Import manual build step from build.yml
2020-05-07 11:17:34 -04:00
Brian Cristante
c7768d4a7b Adapt create-latest-svc.sh to work with GHES (#452)
* Add README

* Adapt create-latest-svc to work with GHES

* Fix inverted conditions
2020-04-27 23:53:53 -04:00
Tingluo Huang
70729fb3c4 Help trace worker crash in Kusto. (#450)
* Help trace worker crash in Kusto.

* more

* feedback.
2020-04-27 23:44:17 -04:00
Tingluo Huang
1470a3b6e2 better error when runner removed from service. (#441) 2020-04-27 23:02:00 -04:00
eric sciple
2fadf430e4 post-alpha fixes for github.url github.api_url and github.graphql_url (#451) 2020-04-24 13:38:59 -04:00
PJ Quirk
f798f5606b Use the API_URL and munge action URLs for GHES (#437)
* First pass at logic for GHES, not all correct

* Need to mock out file downloading

* Allowed for mocking of HTTP responses

* Added test for builtin GHES action download

* More tests

* Don't retry on action 404

* Remove commented out code

* Add a using statement back, because Windows

* Make windows happy again

* Another windows fix

* Always delete the cache since it isn't fully implemented

* Use RunnerService base class

* Add examples, update URL path

* Remove forceDotCom

* Fix a bug

* Remove a test that's no longer relevant

* PR feedback

* Add missing return

* More trace info

* Use the new agreed-upon format

* Use the auth token since we're hitting GHES directly

* Fixing tests on windows

* Fixed one more test
2020-04-23 17:02:13 -04:00
Tingluo Huang
3f7a01af93 add secret masker for trimming double qoutes. (#440) 2020-04-21 22:07:55 -04:00
Tingluo Huang
d5c54f9819 Raise warning when action input does not match action.yml. (#429) 2020-04-21 19:42:53 -04:00
Mathias LANG
9f78ad3b34 Make release notes code blocks copy-paste-able (#430)
The release notes used C-style comments (`//`) instead of shell-style (`#`),
causing the code snippet to not be easily copy-paste-able.

Additionally, the code fence for Windows had the extra `powershell` added,
as well as a comment to direct users towards `powershell` over `cmd`.
2020-04-19 22:11:44 -04:00
Jan Pazdziora
97883c8cd5 Fix spelling of RHEL and CentOS. (#436) 2020-04-19 16:53:06 -04:00
Tingluo Huang
c5fa9fb062 Print node version in debug instead of output. (#433) 2020-04-17 11:53:51 -04:00
Bryan MacFarlane
b2dcdc21dc Sample scripts to automate scaleable runners (#427) 2020-04-17 11:08:45 -04:00
289 changed files with 22244 additions and 6545 deletions

8
.editorconfig Normal file
View File

@@ -0,0 +1,8 @@
# https://editorconfig.org/
[*]
insert_final_newline = true # ensure all files end with a single newline
trim_trailing_whitespace = true # attempt to remove trailing whitespace on save
[*.md]
trim_trailing_whitespace = false # in markdown, "two trailing spaces" is unfortunately meaningful; it means `<br>`

View File

@@ -1,12 +1,18 @@
---
name: Bug report
about: Create a report to help us improve
name: 🛑 Report a bug in the runner application
about: If you have issues with GitHub Actions, please follow the "support for GitHub Actions" link, below.
title: ''
labels: bug
assignees: ''
---
<!--
👋 You're opening a bug report against the GitHub Actions **runner application**.
🛑 Please stop if you're not certain that the bug you're seeing is in the runner application - if you have general problems with actions, workflows, or runners, please see the [GitHub Community Support Forum](https://github.community/c/code-to-cloud/52) which is actively monitored. Using the forum ensures that we route your problem to the correct team. 😃
-->
**Describe the bug**
A clear and concise description of what the bug is.

11
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@@ -0,0 +1,11 @@
blank_issues_enabled: false
contact_links:
- name: ✅ Support for GitHub Actions
url: https://github.community/c/code-to-cloud/52
about: If you have questions about GitHub Actions or need support writing workflows, please ask in the GitHub Community Support forum.
- name: ✅ Feedback and suggestions for GitHub Actions
url: https://github.com/github/feedback/discussions/categories/actions-and-packages-feedback
about: If you have feedback or suggestions about GitHub Actions, please open a discussion (or add to an existing one) in the GitHub Actions Feedback. GitHub Actions Product Managers and Engineers monitor the feedback forum.
- name: ‼️ GitHub Security Bug Bounty
url: https://bounty.github.com/
about: Please report security vulnerabilities here.

View File

@@ -1,19 +1,24 @@
---
name: Feature Request
about: Create a request to help us improve
name: 🛑 Request a feature in the runner application
about: If you have feature requests for GitHub Actions, please use the "feedback and suggestions for GitHub Actions" link below.
title: ''
labels: enhancement
assignees: ''
---
Thank you 🙇‍♀ for wanting to create a feature in this repository. Before you do, please ensure you are filing the issue in the right place. Issues should only be opened on if the issue **relates to code in this repository**.
<!--
👋 You're opening a request for an enhancement in the GitHub Actions **runner application**.
🛑 Please stop if you're not certain that the feature you want is in the runner application - if you have a suggestion for improving GitHub Actions, please see the [GitHub Actions Feedback](https://github.com/github/feedback/discussions/categories/actions-and-packages-feedback) discussion forum which is actively monitored. Using the forum ensures that we route your problem to the correct team. 😃
Some additional useful links:
* If you have found a security issue [please submit it here](https://hackerone.com/github)
* If you have questions or issues with the service, writing workflows or actions, then please [visit the GitHub Community Forum's Actions Board](https://github.community/t5/GitHub-Actions/bd-p/actions)
* If you are having an issue or question about GitHub Actions then please [contact customer support](https://help.github.com/en/actions/automating-your-workflow-with-github-actions/about-github-actions#contacting-support)
* If you are having an issue or have a question about GitHub Actions then please [contact customer support](https://help.github.com/en/actions/automating-your-workflow-with-github-actions/about-github-actions#contacting-support)
If you have a feature request that is relevant to this repository, the runner, then please include the information below:
-->
**Describe the enhancement**
A clear and concise description of what the features or enhancement you need.

View File

@@ -1,9 +1,10 @@
name: Runner CI
on:
workflow_dispatch:
push:
branches:
- master
- main
- releases/*
paths-ignore:
- '**.md'
@@ -17,7 +18,7 @@ jobs:
build:
strategy:
matrix:
runtime: [ linux-x64, linux-arm64, linux-arm, win-x64, osx-x64 ]
runtime: [ linux-x64, linux-arm64, linux-arm, win-x64, osx-x64, osx-arm64 ]
include:
- runtime: linux-x64
os: ubuntu-latest
@@ -35,13 +36,17 @@ jobs:
os: macOS-latest
devScript: ./dev.sh
- runtime: osx-arm64
os: macOS-latest
devScript: ./dev.sh
- runtime: win-x64
os: windows-latest
os: windows-2019
devScript: ./dev
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v1
- uses: actions/checkout@v3
# Build runner layout
- name: Build & Layout Release
@@ -49,12 +54,35 @@ jobs:
${{ matrix.devScript }} layout Release ${{ matrix.runtime }}
working-directory: src
# Check runtime/externals hash
- name: Compute/Compare runtime and externals Hash
shell: bash
run: |
echo "Current dotnet runtime hash result: $DOTNET_RUNTIME_HASH"
echo "Current Externals hash result: $EXTERNALS_HASH"
NeedUpdate=0
if [ "$EXTERNALS_HASH" != "$(cat ./src/Misc/contentHash/externals/${{ matrix.runtime }})" ] ;then
echo Hash mismatch, Update ./src/Misc/contentHash/externals/${{ matrix.runtime }} to $EXTERNALS_HASH
NeedUpdate=1
fi
if [ "$DOTNET_RUNTIME_HASH" != "$(cat ./src/Misc/contentHash/dotnetRuntime/${{ matrix.runtime }})" ] ;then
echo Hash mismatch, Update ./src/Misc/contentHash/dotnetRuntime/${{ matrix.runtime }} to $DOTNET_RUNTIME_HASH
NeedUpdate=1
fi
exit $NeedUpdate
env:
DOTNET_RUNTIME_HASH: ${{hashFiles('**/_layout_trims/runtime/**/*')}}
EXTERNALS_HASH: ${{hashFiles('**/_layout_trims/externals/**/*')}}
# Run tests
- name: L0
run: |
${{ matrix.devScript }} test
working-directory: src
if: matrix.runtime != 'linux-arm64' && matrix.runtime != 'linux-arm'
if: matrix.runtime != 'linux-arm64' && matrix.runtime != 'linux-arm' && matrix.runtime != 'osx-arm64'
# Create runner package tar.gz/zip
- name: Package Release
@@ -66,7 +94,11 @@ jobs:
# Upload runner package tar.gz/zip as artifact
- name: Publish Artifact
if: github.event_name != 'pull_request'
uses: actions/upload-artifact@v1
uses: actions/upload-artifact@v2
with:
name: runner-package-${{ matrix.runtime }}
path: _package
path: |
_package
_package_trims/trim_externals
_package_trims/trim_runtime
_package_trims/trim_runtime_externals

41
.github/workflows/codeql.yml vendored Normal file
View File

@@ -0,0 +1,41 @@
name: "Code Scanning - Action"
permissions:
security-events: write
on:
push:
branches:
- main
pull_request:
schedule:
- cron: '0 0 * * 0'
jobs:
CodeQL-Build:
strategy:
fail-fast: false
# CodeQL runs on ubuntu-latest, windows-latest, and macos-latest
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
# Override language selection by uncommenting this and choosing your languages
# with:
# languages: go, javascript, csharp, python, cpp, java
- name: Manual build
run : |
./dev.sh layout Release linux-x64
working-directory: src
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

View File

@@ -1,16 +1,17 @@
name: Runner CD
on:
workflow_dispatch:
push:
paths:
- releaseVersion
jobs:
check:
if: startsWith(github.ref, 'refs/heads/releases/') || github.ref == 'refs/heads/master'
if: startsWith(github.ref, 'refs/heads/releases/') || github.ref == 'refs/heads/main'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
# Make sure ./releaseVersion match ./src/runnerversion
# Query GitHub release ensure version is not used
@@ -44,9 +45,34 @@ jobs:
build:
needs: check
outputs:
linux-x64-sha: ${{ steps.sha.outputs.linux-x64-sha256 }}
linux-arm64-sha: ${{ steps.sha.outputs.linux-arm64-sha256 }}
linux-arm-sha: ${{ steps.sha.outputs.linux-arm-sha256 }}
win-x64-sha: ${{ steps.sha.outputs.win-x64-sha256 }}
osx-x64-sha: ${{ steps.sha.outputs.osx-x64-sha256 }}
osx-arm64-sha: ${{ steps.sha.outputs.osx-arm64-sha256 }}
linux-x64-sha-noexternals: ${{ steps.sha_noexternals.outputs.linux-x64-sha256 }}
linux-arm64-sha-noexternals: ${{ steps.sha_noexternals.outputs.linux-arm64-sha256 }}
linux-arm-sha-noexternals: ${{ steps.sha_noexternals.outputs.linux-arm-sha256 }}
win-x64-sha-noexternals: ${{ steps.sha_noexternals.outputs.win-x64-sha256 }}
osx-x64-sha-noexternals: ${{ steps.sha_noexternals.outputs.osx-x64-sha256 }}
osx-arm64-sha-noexternals: ${{ steps.sha_noexternals.outputs.osx-arm64-sha256 }}
linux-x64-sha-noruntime: ${{ steps.sha_noruntime.outputs.linux-x64-sha256 }}
linux-arm64-sha-noruntime: ${{ steps.sha_noruntime.outputs.linux-arm64-sha256 }}
linux-arm-sha-noruntime: ${{ steps.sha_noruntime.outputs.linux-arm-sha256 }}
win-x64-sha-noruntime: ${{ steps.sha_noruntime.outputs.win-x64-sha256 }}
osx-x64-sha-noruntime: ${{ steps.sha_noruntime.outputs.osx-x64-sha256 }}
osx-arm64-sha-noruntime: ${{ steps.sha_noruntime.outputs.osx-arm64-sha256 }}
linux-x64-sha-noruntime-noexternals: ${{ steps.sha_noruntime_noexternals.outputs.linux-x64-sha256 }}
linux-arm64-sha-noruntime-noexternals: ${{ steps.sha_noruntime_noexternals.outputs.linux-arm64-sha256 }}
linux-arm-sha-noruntime-noexternals: ${{ steps.sha_noruntime_noexternals.outputs.linux-arm-sha256 }}
win-x64-sha-noruntime-noexternals: ${{ steps.sha_noruntime_noexternals.outputs.win-x64-sha256 }}
osx-x64-sha-noruntime-noexternals: ${{ steps.sha_noruntime_noexternals.outputs.osx-x64-sha256 }}
osx-arm64-sha-noruntime-noexternals: ${{ steps.sha_noruntime_noexternals.outputs.osx-arm64-sha256 }}
strategy:
matrix:
runtime: [ linux-x64, linux-arm64, linux-arm, win-x64, osx-x64 ]
runtime: [ linux-x64, linux-arm64, linux-arm, win-x64, osx-x64, osx-arm64 ]
include:
- runtime: linux-x64
os: ubuntu-latest
@@ -64,13 +90,17 @@ jobs:
os: macOS-latest
devScript: ./dev.sh
- runtime: osx-arm64
os: macOS-latest
devScript: ./dev.sh
- runtime: win-x64
os: windows-latest
os: windows-2019
devScript: ./dev
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v1
- uses: actions/checkout@v3
# Build runner layout
- name: Build & Layout Release
@@ -78,13 +108,6 @@ jobs:
${{ matrix.devScript }} layout Release ${{ matrix.runtime }}
working-directory: src
# Run tests
- name: L0
run: |
${{ matrix.devScript }} test
working-directory: src
if: matrix.runtime != 'linux-arm64' && matrix.runtime != 'linux-arm'
# Create runner package tar.gz/zip
- name: Package Release
if: github.event_name != 'pull_request'
@@ -92,21 +115,110 @@ jobs:
${{ matrix.devScript }} package Release ${{ matrix.runtime }}
working-directory: src
# compute shas and set as job outputs to use in release notes
- run: brew install coreutils #needed for shasum util
if: ${{ matrix.os == 'macOS-latest' }}
name: Install Dependencies for SHA Calculation (osx)
- run: |
file=$(ls)
sha=$(sha256sum $file | awk '{ print $1 }')
echo "Computed sha256: $sha for $file"
echo "::set-output name=${{matrix.runtime}}-sha256::$sha"
shell: bash
id: sha
name: Compute SHA256
working-directory: _package
- run: |
file=$(ls)
sha=$(sha256sum $file | awk '{ print $1 }')
echo "Computed sha256: $sha for $file"
echo "::set-output name=${{matrix.runtime}}-sha256::$sha"
echo "::set-output name=sha256::$sha"
shell: bash
id: sha_noexternals
name: Compute SHA256
working-directory: _package_trims/trim_externals
- run: |
file=$(ls)
sha=$(sha256sum $file | awk '{ print $1 }')
echo "Computed sha256: $sha for $file"
echo "::set-output name=${{matrix.runtime}}-sha256::$sha"
echo "::set-output name=sha256::$sha"
shell: bash
id: sha_noruntime
name: Compute SHA256
working-directory: _package_trims/trim_runtime
- run: |
file=$(ls)
sha=$(sha256sum $file | awk '{ print $1 }')
echo "Computed sha256: $sha for $file"
echo "::set-output name=${{matrix.runtime}}-sha256::$sha"
echo "::set-output name=sha256::$sha"
shell: bash
id: sha_noruntime_noexternals
name: Compute SHA256
working-directory: _package_trims/trim_runtime_externals
- name: Create trimmedpackages.json for ${{ matrix.runtime }}
if: matrix.runtime == 'win-x64'
uses: actions/github-script@0.3.0
with:
github-token: ${{secrets.GITHUB_TOKEN}}
script: |
const core = require('@actions/core')
const fs = require('fs');
const runnerVersion = fs.readFileSync('src/runnerversion', 'utf8').replace(/\n$/g, '')
var trimmedPackages = fs.readFileSync('src/Misc/trimmedpackages_zip.json', 'utf8').replace(/<RUNNER_VERSION>/g, runnerVersion).replace(/<RUNNER_PLATFORM>/g, '${{ matrix.runtime }}')
trimmedPackages = trimmedPackages.replace(/<RUNTIME_HASH>/g, '${{hashFiles('**/_layout_trims/runtime/**/*')}}')
trimmedPackages = trimmedPackages.replace(/<EXTERNALS_HASH>/g, '${{hashFiles('**/_layout_trims/externals/**/*')}}')
trimmedPackages = trimmedPackages.replace(/<NO_RUNTIME_EXTERNALS_HASH>/g, '${{steps.sha_noruntime_noexternals.outputs.sha256}}')
trimmedPackages = trimmedPackages.replace(/<NO_RUNTIME_HASH>/g, '${{steps.sha_noruntime.outputs.sha256}}')
trimmedPackages = trimmedPackages.replace(/<NO_EXTERNALS_HASH>/g, '${{steps.sha_noexternals.outputs.sha256}}')
console.log(trimmedPackages)
fs.writeFileSync('${{ matrix.runtime }}-trimmedpackages.json', trimmedPackages)
- name: Create trimmedpackages.json for ${{ matrix.runtime }}
if: matrix.runtime != 'win-x64'
uses: actions/github-script@0.3.0
with:
github-token: ${{secrets.GITHUB_TOKEN}}
script: |
const core = require('@actions/core')
const fs = require('fs');
const runnerVersion = fs.readFileSync('src/runnerversion', 'utf8').replace(/\n$/g, '')
var trimmedPackages = fs.readFileSync('src/Misc/trimmedpackages_targz.json', 'utf8').replace(/<RUNNER_VERSION>/g, runnerVersion).replace(/<RUNNER_PLATFORM>/g, '${{ matrix.runtime }}')
trimmedPackages = trimmedPackages.replace(/<RUNTIME_HASH>/g, '${{hashFiles('**/_layout_trims/runtime/**/*')}}')
trimmedPackages = trimmedPackages.replace(/<EXTERNALS_HASH>/g, '${{hashFiles('**/_layout_trims/externals/**/*')}}')
trimmedPackages = trimmedPackages.replace(/<NO_RUNTIME_EXTERNALS_HASH>/g, '${{steps.sha_noruntime_noexternals.outputs.sha256}}')
trimmedPackages = trimmedPackages.replace(/<NO_RUNTIME_HASH>/g, '${{steps.sha_noruntime.outputs.sha256}}')
trimmedPackages = trimmedPackages.replace(/<NO_EXTERNALS_HASH>/g, '${{steps.sha_noexternals.outputs.sha256}}')
console.log(trimmedPackages)
fs.writeFileSync('${{ matrix.runtime }}-trimmedpackages.json', trimmedPackages)
# Upload runner package tar.gz/zip as artifact.
# Since each package name is unique, so we don't need to put ${{matrix}} info into artifact name
- name: Publish Artifact
if: github.event_name != 'pull_request'
uses: actions/upload-artifact@v1
uses: actions/upload-artifact@v2
with:
name: runner-packages
path: _package
path: |
_package
_package_trims/trim_externals
_package_trims/trim_runtime
_package_trims/trim_runtime_externals
${{ matrix.runtime }}-trimmedpackages.json
release:
needs: build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
# Download runner package tar.gz/zip produced by 'build' job
- name: Download Artifact
@@ -125,11 +237,46 @@ jobs:
const core = require('@actions/core')
const fs = require('fs');
const runnerVersion = fs.readFileSync('${{ github.workspace }}/src/runnerversion', 'utf8').replace(/\n$/g, '')
const releaseNote = fs.readFileSync('${{ github.workspace }}/releaseNote.md', 'utf8').replace(/<RUNNER_VERSION>/g, runnerVersion)
var releaseNote = fs.readFileSync('${{ github.workspace }}/releaseNote.md', 'utf8').replace(/<RUNNER_VERSION>/g, runnerVersion)
releaseNote = releaseNote.replace(/<WIN_X64_SHA>/g, '${{needs.build.outputs.win-x64-sha}}')
releaseNote = releaseNote.replace(/<OSX_X64_SHA>/g, '${{needs.build.outputs.osx-x64-sha}}')
releaseNote = releaseNote.replace(/<OSX_ARM64_SHA>/g, '${{needs.build.outputs.osx-arm64-sha}}')
releaseNote = releaseNote.replace(/<LINUX_X64_SHA>/g, '${{needs.build.outputs.linux-x64-sha}}')
releaseNote = releaseNote.replace(/<LINUX_ARM_SHA>/g, '${{needs.build.outputs.linux-arm-sha}}')
releaseNote = releaseNote.replace(/<LINUX_ARM64_SHA>/g, '${{needs.build.outputs.linux-arm64-sha}}')
releaseNote = releaseNote.replace(/<WIN_X64_SHA_NOEXTERNALS>/g, '${{needs.build.outputs.win-x64-sha-noexternals}}')
releaseNote = releaseNote.replace(/<OSX_X64_SHA_NOEXTERNALS>/g, '${{needs.build.outputs.osx-x64-sha-noexternals}}')
releaseNote = releaseNote.replace(/<OSX_ARM64_SHA_NOEXTERNALS>/g, '${{needs.build.outputs.osx-arm64-sha-noexternals}}')
releaseNote = releaseNote.replace(/<LINUX_X64_SHA_NOEXTERNALS>/g, '${{needs.build.outputs.linux-x64-sha-noexternals}}')
releaseNote = releaseNote.replace(/<LINUX_ARM_SHA_NOEXTERNALS>/g, '${{needs.build.outputs.linux-arm-sha-noexternals}}')
releaseNote = releaseNote.replace(/<LINUX_ARM64_SHA_NOEXTERNALS>/g, '${{needs.build.outputs.linux-arm64-sha-noexternals}}')
releaseNote = releaseNote.replace(/<WIN_X64_SHA_NORUNTIME>/g, '${{needs.build.outputs.win-x64-sha-noruntime}}')
releaseNote = releaseNote.replace(/<OSX_X64_SHA_NORUNTIME>/g, '${{needs.build.outputs.osx-x64-sha-noruntime}}')
releaseNote = releaseNote.replace(/<OSX_ARM64_SHA_NORUNTIME>/g, '${{needs.build.outputs.osx-arm64-sha-noruntime}}')
releaseNote = releaseNote.replace(/<LINUX_X64_SHA_NORUNTIME>/g, '${{needs.build.outputs.linux-x64-sha-noruntime}}')
releaseNote = releaseNote.replace(/<LINUX_ARM_SHA_NORUNTIME>/g, '${{needs.build.outputs.linux-arm-sha-noruntime}}')
releaseNote = releaseNote.replace(/<LINUX_ARM64_SHA_NORUNTIME>/g, '${{needs.build.outputs.linux-arm64-sha-noruntime}}')
releaseNote = releaseNote.replace(/<WIN_X64_SHA_NORUNTIME_NOEXTERNALS>/g, '${{needs.build.outputs.win-x64-sha-noruntime-noexternals}}')
releaseNote = releaseNote.replace(/<OSX_X64_SHA_NORUNTIME_NOEXTERNALS>/g, '${{needs.build.outputs.osx-x64-sha-noruntime-noexternals}}')
releaseNote = releaseNote.replace(/<OSX_ARM64_SHA_NORUNTIME_NOEXTERNALS>/g, '${{needs.build.outputs.osx-arm64-sha-noruntime-noexternals}}')
releaseNote = releaseNote.replace(/<LINUX_X64_SHA_NORUNTIME_NOEXTERNALS>/g, '${{needs.build.outputs.linux-x64-sha-noruntime-noexternals}}')
releaseNote = releaseNote.replace(/<LINUX_ARM_SHA_NORUNTIME_NOEXTERNALS>/g, '${{needs.build.outputs.linux-arm-sha-noruntime-noexternals}}')
releaseNote = releaseNote.replace(/<LINUX_ARM64_SHA_NORUNTIME_NOEXTERNALS>/g, '${{needs.build.outputs.linux-arm64-sha-noruntime-noexternals}}')
console.log(releaseNote)
core.setOutput('version', runnerVersion);
core.setOutput('note', releaseNote);
- name: Validate Packages HASH
working-directory: _package
run: |
ls -l
echo "${{needs.build.outputs.win-x64-sha}} actions-runner-win-x64-${{ steps.releaseNote.outputs.version }}.zip" | shasum -a 256 -c
echo "${{needs.build.outputs.osx-x64-sha}} actions-runner-osx-x64-${{ steps.releaseNote.outputs.version }}.tar.gz" | shasum -a 256 -c
echo "${{needs.build.outputs.osx-arm64-sha}} actions-runner-osx-arm64-${{ steps.releaseNote.outputs.version }}.tar.gz" | shasum -a 256 -c
echo "${{needs.build.outputs.linux-x64-sha}} actions-runner-linux-x64-${{ steps.releaseNote.outputs.version }}.tar.gz" | shasum -a 256 -c
echo "${{needs.build.outputs.linux-arm-sha}} actions-runner-linux-arm-${{ steps.releaseNote.outputs.version }}.tar.gz" | shasum -a 256 -c
echo "${{needs.build.outputs.linux-arm64-sha}} actions-runner-linux-arm64-${{ steps.releaseNote.outputs.version }}.tar.gz" | shasum -a 256 -c
# Create GitHub release
- uses: actions/create-release@master
id: createRelease
@@ -141,16 +288,15 @@ jobs:
release_name: "v${{ steps.releaseNote.outputs.version }}"
body: |
${{ steps.releaseNote.outputs.note }}
prerelease: true
# Upload release assets
# Upload release assets (full runner packages)
- name: Upload Release Asset (win-x64)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/actions-runner-win-x64-${{ steps.releaseNote.outputs.version }}.zip
asset_path: ${{ github.workspace }}/_package/actions-runner-win-x64-${{ steps.releaseNote.outputs.version }}.zip
asset_name: actions-runner-win-x64-${{ steps.releaseNote.outputs.version }}.zip
asset_content_type: application/octet-stream
@@ -160,7 +306,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/actions-runner-linux-x64-${{ steps.releaseNote.outputs.version }}.tar.gz
asset_path: ${{ github.workspace }}/_package/actions-runner-linux-x64-${{ steps.releaseNote.outputs.version }}.tar.gz
asset_name: actions-runner-linux-x64-${{ steps.releaseNote.outputs.version }}.tar.gz
asset_content_type: application/octet-stream
@@ -170,17 +316,27 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/actions-runner-osx-x64-${{ steps.releaseNote.outputs.version }}.tar.gz
asset_path: ${{ github.workspace }}/_package/actions-runner-osx-x64-${{ steps.releaseNote.outputs.version }}.tar.gz
asset_name: actions-runner-osx-x64-${{ steps.releaseNote.outputs.version }}.tar.gz
asset_content_type: application/octet-stream
- name: Upload Release Asset (osx-arm64)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package/actions-runner-osx-arm64-${{ steps.releaseNote.outputs.version }}.tar.gz
asset_name: actions-runner-osx-arm64-${{ steps.releaseNote.outputs.version }}.tar.gz
asset_content_type: application/octet-stream
- name: Upload Release Asset (linux-arm)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/actions-runner-linux-arm-${{ steps.releaseNote.outputs.version }}.tar.gz
asset_path: ${{ github.workspace }}/_package/actions-runner-linux-arm-${{ steps.releaseNote.outputs.version }}.tar.gz
asset_name: actions-runner-linux-arm-${{ steps.releaseNote.outputs.version }}.tar.gz
asset_content_type: application/octet-stream
@@ -190,6 +346,250 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/actions-runner-linux-arm64-${{ steps.releaseNote.outputs.version }}.tar.gz
asset_path: ${{ github.workspace }}/_package/actions-runner-linux-arm64-${{ steps.releaseNote.outputs.version }}.tar.gz
asset_name: actions-runner-linux-arm64-${{ steps.releaseNote.outputs.version }}.tar.gz
asset_content_type: application/octet-stream
# Upload release assets (trim externals)
- name: Upload Release Asset (win-x64-noexternals)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package_trims/trim_externals/actions-runner-win-x64-${{ steps.releaseNote.outputs.version }}-noexternals.zip
asset_name: actions-runner-win-x64-${{ steps.releaseNote.outputs.version }}-noexternals.zip
asset_content_type: application/octet-stream
- name: Upload Release Asset (linux-x64-noexternals)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package_trims/trim_externals/actions-runner-linux-x64-${{ steps.releaseNote.outputs.version }}-noexternals.tar.gz
asset_name: actions-runner-linux-x64-${{ steps.releaseNote.outputs.version }}-noexternals.tar.gz
asset_content_type: application/octet-stream
- name: Upload Release Asset (osx-x64-noexternals)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package_trims/trim_externals/actions-runner-osx-x64-${{ steps.releaseNote.outputs.version }}-noexternals.tar.gz
asset_name: actions-runner-osx-x64-${{ steps.releaseNote.outputs.version }}-noexternals.tar.gz
asset_content_type: application/octet-stream
- name: Upload Release Asset (osx-arm64-noexternals)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package_trims/trim_externals/actions-runner-osx-arm64-${{ steps.releaseNote.outputs.version }}-noexternals.tar.gz
asset_name: actions-runner-osx-arm64-${{ steps.releaseNote.outputs.version }}-noexternals.tar.gz
asset_content_type: application/octet-stream
- name: Upload Release Asset (linux-arm-noexternals)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package_trims/trim_externals/actions-runner-linux-arm-${{ steps.releaseNote.outputs.version }}-noexternals.tar.gz
asset_name: actions-runner-linux-arm-${{ steps.releaseNote.outputs.version }}-noexternals.tar.gz
asset_content_type: application/octet-stream
- name: Upload Release Asset (linux-arm64-noexternals)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package_trims/trim_externals/actions-runner-linux-arm64-${{ steps.releaseNote.outputs.version }}-noexternals.tar.gz
asset_name: actions-runner-linux-arm64-${{ steps.releaseNote.outputs.version }}-noexternals.tar.gz
asset_content_type: application/octet-stream
# Upload release assets (trim runtime)
- name: Upload Release Asset (win-x64-noruntime)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package_trims/trim_runtime/actions-runner-win-x64-${{ steps.releaseNote.outputs.version }}-noruntime.zip
asset_name: actions-runner-win-x64-${{ steps.releaseNote.outputs.version }}-noruntime.zip
asset_content_type: application/octet-stream
- name: Upload Release Asset (linux-x64-noruntime)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package_trims/trim_runtime/actions-runner-linux-x64-${{ steps.releaseNote.outputs.version }}-noruntime.tar.gz
asset_name: actions-runner-linux-x64-${{ steps.releaseNote.outputs.version }}-noruntime.tar.gz
asset_content_type: application/octet-stream
- name: Upload Release Asset (osx-x64-noruntime)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package_trims/trim_runtime/actions-runner-osx-x64-${{ steps.releaseNote.outputs.version }}-noruntime.tar.gz
asset_name: actions-runner-osx-x64-${{ steps.releaseNote.outputs.version }}-noruntime.tar.gz
asset_content_type: application/octet-stream
- name: Upload Release Asset (osx-arm64-noruntime)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package_trims/trim_runtime/actions-runner-osx-arm64-${{ steps.releaseNote.outputs.version }}-noruntime.tar.gz
asset_name: actions-runner-osx-arm64-${{ steps.releaseNote.outputs.version }}-noruntime.tar.gz
asset_content_type: application/octet-stream
- name: Upload Release Asset (linux-arm-noruntime)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package_trims/trim_runtime/actions-runner-linux-arm-${{ steps.releaseNote.outputs.version }}-noruntime.tar.gz
asset_name: actions-runner-linux-arm-${{ steps.releaseNote.outputs.version }}-noruntime.tar.gz
asset_content_type: application/octet-stream
- name: Upload Release Asset (linux-arm64-noruntime)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package_trims/trim_runtime/actions-runner-linux-arm64-${{ steps.releaseNote.outputs.version }}-noruntime.tar.gz
asset_name: actions-runner-linux-arm64-${{ steps.releaseNote.outputs.version }}-noruntime.tar.gz
asset_content_type: application/octet-stream
# Upload release assets (trim runtime and externals)
- name: Upload Release Asset (win-x64-noruntime-noexternals)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package_trims/trim_runtime_externals/actions-runner-win-x64-${{ steps.releaseNote.outputs.version }}-noruntime-noexternals.zip
asset_name: actions-runner-win-x64-${{ steps.releaseNote.outputs.version }}-noruntime-noexternals.zip
asset_content_type: application/octet-stream
- name: Upload Release Asset (linux-x64-noruntime-noexternals)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package_trims/trim_runtime_externals/actions-runner-linux-x64-${{ steps.releaseNote.outputs.version }}-noruntime-noexternals.tar.gz
asset_name: actions-runner-linux-x64-${{ steps.releaseNote.outputs.version }}-noruntime-noexternals.tar.gz
asset_content_type: application/octet-stream
- name: Upload Release Asset (osx-x64-noruntime-noexternals)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package_trims/trim_runtime_externals/actions-runner-osx-x64-${{ steps.releaseNote.outputs.version }}-noruntime-noexternals.tar.gz
asset_name: actions-runner-osx-x64-${{ steps.releaseNote.outputs.version }}-noruntime-noexternals.tar.gz
asset_content_type: application/octet-stream
- name: Upload Release Asset (osx-arm64-noruntime-noexternals)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package_trims/trim_runtime_externals/actions-runner-osx-arm64-${{ steps.releaseNote.outputs.version }}-noruntime-noexternals.tar.gz
asset_name: actions-runner-osx-arm64-${{ steps.releaseNote.outputs.version }}-noruntime-noexternals.tar.gz
asset_content_type: application/octet-stream
- name: Upload Release Asset (linux-arm-noruntime-noexternals)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package_trims/trim_runtime_externals/actions-runner-linux-arm-${{ steps.releaseNote.outputs.version }}-noruntime-noexternals.tar.gz
asset_name: actions-runner-linux-arm-${{ steps.releaseNote.outputs.version }}-noruntime-noexternals.tar.gz
asset_content_type: application/octet-stream
- name: Upload Release Asset (linux-arm64-noruntime-noexternals)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/_package_trims/trim_runtime_externals/actions-runner-linux-arm64-${{ steps.releaseNote.outputs.version }}-noruntime-noexternals.tar.gz
asset_name: actions-runner-linux-arm64-${{ steps.releaseNote.outputs.version }}-noruntime-noexternals.tar.gz
asset_content_type: application/octet-stream
# Upload release assets (trimmedpackages.json)
- name: Upload Release Asset (win-x64-trimmedpackages.json)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/win-x64-trimmedpackages.json
asset_name: actions-runner-win-x64-${{ steps.releaseNote.outputs.version }}-trimmedpackages.json
asset_content_type: application/octet-stream
- name: Upload Release Asset (linux-x64-trimmedpackages.json)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/linux-x64-trimmedpackages.json
asset_name: actions-runner-linux-x64-${{ steps.releaseNote.outputs.version }}-trimmedpackages.json
asset_content_type: application/octet-stream
- name: Upload Release Asset (osx-x64-trimmedpackages.json)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/osx-x64-trimmedpackages.json
asset_name: actions-runner-osx-x64-${{ steps.releaseNote.outputs.version }}-trimmedpackages.json
asset_content_type: application/octet-stream
- name: Upload Release Asset (osx-arm64-trimmedpackages.json)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/osx-arm64-trimmedpackages.json
asset_name: actions-runner-osx-arm64-${{ steps.releaseNote.outputs.version }}-trimmedpackages.json
asset_content_type: application/octet-stream
- name: Upload Release Asset (linux-arm-trimmedpackages.json)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/linux-arm-trimmedpackages.json
asset_name: actions-runner-linux-arm-${{ steps.releaseNote.outputs.version }}-trimmedpackages.json
asset_content_type: application/octet-stream
- name: Upload Release Asset (linux-arm64-trimmedpackages.json)
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/linux-arm64-trimmedpackages.json
asset_name: actions-runner-linux-arm64-${{ steps.releaseNote.outputs.version }}-trimmedpackages.json
asset_content_type: application/octet-stream

9
.gitignore vendored
View File

@@ -8,21 +8,22 @@
**/*.xproj
**/*.xproj.user
**/.vs
**/.vscode
**/*.error
**/*.json.pretty
.idea/
.vscode
!.vscode/launch.json
!.vscode/tasks.json
# output
node_modules
_downloads
_layout
_layout_trims
_package
_package_trims
_dotnetsdk
TestResults
TestLogs
.DS_Store
**/*.DotSettings.user
#generated
src/Runner.Sdk/BuildConstants.cs

58
.vscode/launch.json vendored Normal file
View File

@@ -0,0 +1,58 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "Run [build]",
"type": "coreclr",
"request": "launch",
"preLaunchTask": "build runner layout",
"program": "${workspaceFolder}/_layout/bin/Runner.Listener",
"args": [
"run"
],
"cwd": "${workspaceFolder}/src",
"console": "integratedTerminal",
"requireExactSource": false
},
{
"name": "Run",
"type": "coreclr",
"request": "launch",
"program": "${workspaceFolder}/_layout/bin/Runner.Listener",
"args": [
"run"
],
"cwd": "${workspaceFolder}/src",
"console": "integratedTerminal",
"requireExactSource": false
},
{
"name": "Configure",
"type": "coreclr",
"request": "launch",
"preLaunchTask": "create runner layout",
"program": "${workspaceFolder}/_layout/bin/Runner.Listener",
"args": [
"configure"
],
"cwd": "${workspaceFolder}/src",
"console": "integratedTerminal",
"requireExactSource": false
},
{
"name": "Debug Worker",
"type": "coreclr",
"request": "attach",
"processName": "Runner.Worker",
"requireExactSource": false
},
{
"name": "Attach Debugger",
"type": "coreclr",
"request": "attach",
"processId": "${command:pickProcess}",
"requireExactSource": false
},
],
}

33
.vscode/tasks.json vendored Normal file
View File

@@ -0,0 +1,33 @@
{
"version": "2.0.0",
"tasks": [
{
"label": "create runner layout",
"detail": "Build and Copy all projects, scripts and external dependencies to _layout from src (run this the first time or after deleting _layout)",
"command": "./dev.sh",
"windows": {
"command": "dev.cmd"
},
"args": [
"layout"
],
"options": {
"cwd": "${workspaceFolder}/src"
},
},
{
"label": "build runner layout",
"detail": "Build and Copy all projects to _layout from src (run this on code change)",
"command": "./dev.sh",
"windows": {
"command": "dev.cmd"
},
"args": [
"build"
],
"options": {
"cwd": "${workspaceFolder}/src"
},
}
],
}

1
CODEOWNERS Normal file
View File

@@ -0,0 +1 @@
* @actions/actions-runtime

View File

@@ -6,7 +6,7 @@
[![Actions Status](https://github.com/actions/runner/workflows/Runner%20CI/badge.svg)](https://github.com/actions/runner/actions)
The runner is the application that runs a job from a GitHub Actions workflow. The runner can run on the [hosted machine pools](https://github.com/actions/virtual-environments) or run on [self-hosted environments](https://help.github.com/en/actions/automating-your-workflow-with-github-actions/about-self-hosted-runners).
The runner is the application that runs a job from a GitHub Actions workflow. It is used by GitHub Actions in the [hosted virtual environments](https://github.com/actions/virtual-environments), or you can [self-host the runner](https://help.github.com/en/actions/automating-your-workflow-with-github-actions/about-self-hosted-runners) in your own environment.
## Get Started

View File

@@ -1,4 +1,4 @@
# ADR 263: Self Hosted Runner Proxies
# ADR 263: Self-Hosted Runner Proxies
**Date**: 2019-11-13
@@ -6,13 +6,13 @@
## Context
- Proxy support is required for some enterprises and organizations to start using their own self hosted runners
- While there is not a standard convention, many applications support setting proxies via the environmental variables `http_proxy`, `https_proxy`, `no_proxy`, such as curl, wget, perl, python, docker, git, R, ect
- Proxy support is required for some enterprises and organizations to start using their own self-hosted runners
- While there is not a standard convention, many applications support setting proxies via the environment variables `http_proxy`, `https_proxy`, `no_proxy`, such as curl, wget, perl, python, docker, git, and R
- Some of these applications use `HTTPS_PROXY` versus `https_proxy`, but most understand or primarily support the lowercase variant
## Decision
We will update the Runner to use the conventional environment variables for proxies: `http_proxy`, `https_proxy` and `no_proxy` if they are set.
We will update the Runner to use the conventional environment variables for proxies: `http_proxy`, `https_proxy`, and `no_proxy` if they are set.
These are described in detail below:
- `https_proxy` a proxy URL for all https traffic. It may contain basic authentication credentials. For example:
- http://proxy.com
@@ -22,20 +22,20 @@ These are described in detail below:
- http://proxy.com
- http://127.0.0.1:8080
- http://user:password@proxy.com
- `no_proxy` a comma seperated list of hosts that should not use the proxy. An optional port may be specified
- `no_proxy` a comma-separated list of hosts that should not use the proxy. An optional port may be specified. For example:
- `google.com`
- `yahoo.com:443`
- `google.com,bing.com`
We won't use `http_proxy` for https traffic when `https_proxy` is not set, this behavior lines up with any libcurl based tools (curl, git) and wget.
Otherwise action authors and workflow users need to adjust to differences between the runner proxy convention, and tools used by their actions and scripts.
Otherwise, action authors and workflow users need to adjust to differences between the runner proxy convention, and tools used by their actions and scripts.
Example:
Customer set `http_proxy=http://127.0.0.1:8888` and configure the runner against `https://github.com/owner/repo`, with the `https_proxy` -> `http_proxy` fallback, the runner will connect to server without any problem. However, if user runs `git push` to `https://github.com/owner/repo`, `git` won't use the proxy since it require `https_proxy` to be set for any https traffic.
Customer sets `http_proxy=http://127.0.0.1:8888` and configures the runner against `https://github.com/owner/repo`, with the `https_proxy` -> `http_proxy` fallback, the runner will connect to the server without any problem. However, if a user runs `git push` to `https://github.com/owner/repo`, `git` won't use the proxy since it requires `https_proxy` to be set for any https traffic.
> `golang`, `node.js` and other dev tools from the linux community use `http_proxy` for both http and https traffic base on my research.
> `golang`, `node.js`, and other dev tools from the Linux community use `http_proxy` for both http and https traffic based on my research.
A majority of our users are using Linux where these variables are commonly required to be set by various programs. By reading these values, we simplify the process for self hosted runners to set up proxy, and expose it in a way users are already familiar with.
A majority of our users are using Linux where these variables are commonly required to be set by various programs. By reading these values, we simplify the process for self-hosted runners to set up a proxy and expose it in a way users are already familiar with.
A password provided for a proxy will be masked in the logs.
@@ -43,19 +43,19 @@ We will support the lowercase and uppercase variants, with lowercase taking prio
### No Proxy Format
While exact implementations are different per application on handle `no_proxy` env, most applications accept a comma separated list of hosts. Some accept wildcard characters (*). We are going to do exact case-insentive matches, and not support wildcards at this time.
While exact implementations are different per application on handling `no_proxy` env, most applications accept a comma-separated list of hosts. Some accept wildcard characters (`*`). We are going to do exact case-insensitive matches, and not support wildcards at this time.
For example:
- example.com will match example.com, foo.example.com, foo.bar.example.com
- foo.example.com will match bar.foo.example.com and foo.example.com
- `example.com` will match `example.com`, `foo.example.com`, and `foo.bar.example.com`
- `foo.example.com` will match `bar.foo.example.com` and `foo.example.com`
We will not support IP addresses for `no_proxy`, only hostnames.
## Consequences
1. Enterprises and organizations needing proxy support will be able to embrace self hosted runners
2. Users will need to set these environmental variables before configuring the runner in order to use a proxy when configuring
3. The runner will read from the environmental variables during config and runtime and use the provided proxy if it exists
4. Users may need to pass these environmental variables into other applications if they do not natively take these variables
5. Action authors may need to update their workflows to react to the these environment variables
6. We will document the way of setting environmental variables for runners using the environmental variables and how the runner uses them
7. Like all other secrets, users will be able to relatively easily figure out proxy password if they can modify a workflow file running on a self hosted machine
1. Enterprises and organizations needing proxy support will be able to embrace self-hosted runners
2. Users will need to set these environment variables before configuring the runner in order to use a proxy when configuring
3. The runner will read from the environment variables during config and runtime and use the provided proxy if it exists
4. Users may need to pass these environment variables into other applications if they do not natively take these variables
5. Action authors may need to update their workflows to react to these environment variables
6. We will document the way of setting environment variables for runners using the environment variables and how the runner uses them
7. Like all other secrets, users will be able to relatively easily figure out the proxy password if they can modify a workflow file running on a self-hosted machine

View File

@@ -10,7 +10,7 @@ Compilation failures during a CI build should surface good error messages.
For example, the actual compile errors from the typescript compiler should bubble as issues in the UI. And not simply "tsc exited with exit code 1".
VSCode has an extensible model for solving this type of problem. VSCode allows users to configure which problems matchers to use, when scanning output. For example, a user can apply the `tsc` problem matcher to receive a rich error output experience in VSCode, when compiling their typescript project.
VSCode has an extensible model for solving this type of problem. VSCode allows users to configure which [problems matchers](https://code.visualstudio.com/docs/editor/tasks#_defining-a-problem-matcher) to use, when scanning output. For example, a user can apply the `tsc` problem matcher to receive a rich error output experience in VSCode, when compiling their typescript project.
The problem-matcher concept fits well with "setup" actions. For example, the `setup-nodejs` action will download node.js, add it to the PATH, and register the `tsc` problem matcher. For the duration of the job, the `tsc` problem matcher will be applied against the output.
@@ -18,23 +18,25 @@ The problem-matcher concept fits well with "setup" actions. For example, the `se
### Registration
#### Using `##` command
#### Using `::` command
`##[add-matcher]path-to-problem-matcher-config.json`
`::add-matcher::path-to-problem-matcher-config.json`
Using a `##` command allows for flexibility:
Using a `::` command allows for flexibility:
- Ad hoc scripts can register problem matchers
- Allows problem matchers to be conditionally registered
Note, if a matcher with the same name is registered a second time, it will clobber the first instance.
#### Unregister using `##` command
Note, at some point the syntax changed from `##` to `::`.
#### Unregister using `::` command
A way out for rare cases where scoping is a problem.
`##[remove-matcher]owner`
`::remove-matcher::owner`
For the this to be usable, the `owner` needs to be discoverable. Therefore, debug print the owner on registration.
For this to be usable, the `owner` needs to be discoverable. Therefore, debug print the owner on registration.
### Single line matcher
@@ -104,7 +106,7 @@ message: ; expected
fromPath: C:\myrepo\myproject\ConsoleApp1\ClassLibrary1\ClassLibrary1.csproj
```
Additionally the line will appear red in the web UI (prefix with `##[error]`).
Additionally the line will appear red in the web UI (prefix with `::error`).
Note, an error does not imply task failure. Exit codes communicate failure.
@@ -184,7 +186,7 @@ Solving this problem means:
- Use the `github.workspace` (where the repo is cloned on disk)
- Match against a repository to determine the relative path within the repo
This is a place where we diverge from VSCode. VSCode task configuration are specific to the local workspace (workspace root is known or can be specified). We're solving a more generic problem, so we need more information - specifically the `fromPath` property - in order to accurately root the path.
This is a place where we diverge from VSCode. VSCode task configurations are specific to the local workspace (workspace root is known or can be specified). We're solving a more generic problem, so we need more information - specifically the `fromPath` property - in order to accurately root the path.
In order to avoid creating inaccurate hyperlinks on the error issues, the agent will verify the file exists and is in the main repository. Otherwise omit the file property from the error issue and debug trace what happened.
@@ -203,7 +205,7 @@ Problem matchers are unable to interpret severity strings other than `warning` a
However some tools indicate error/warning in different ways. For example `flake8` uses codes like `E100`, `W200`, and `F300` (error, warning, fatal, respectively).
Therefore, allow a property `severity`, sibling to `owner`, which identifies the default severity for the problem matcher. This allows two problem matchers are registered - one for warnings and one for errors.
Therefore, allow a property `severity`, sibling to `owner`, which identifies the default severity for the problem matcher. This allows two problem matchers to be registered - one for warnings and one for errors.
For example, given the following `flake8` output:

View File

@@ -8,7 +8,7 @@
run-actions run scripts using a platform specific shell:
`bash -eo pipefail` on non-windows, and `cmd.exe /c /d /s` on windows
The `shell` option overwrites this to allow different flags or completely different shells/interpreters
The `shell` option overrides this to allow different flags or completely different shells/interpreters
A small example is:
```yml
@@ -84,7 +84,7 @@ powershell/pwsh
- Users can always opt out by not using the builtins, and providing a shell option like: `pwsh -File {0}`, or `powershell -Command "& '{0}'"`, depending on need
cmd
- There doesnt seem to be a way to fully opt in to fail-fast behavior other than writing your script to check each error code and respond accordingly, so we cant actually provide that behavior by default, it will be completely up to the user to write this behavior into their script
- There doesn't seem to be a way to fully opt in to fail-fast behavior other than writing your script to check each error code and respond accordingly, so we can't actually provide that behavior by default, it will be completely up to the user to write this behavior into their script
- cmd.exe will exit (return the error code to the runner) with the errorlevel of the last program it executed. This is internally consistent with the previous default behavior (sh, pwsh) and is the cmd.exe default, so we keep that behavior
## Consequences

View File

@@ -5,7 +5,7 @@
**Status**: Accepted
## Context
First party action `actions/cache` needs a input which is an explicit `key` used for restoring and saving the cache. For packages caching, the most comment `key` might be the hash result of contents from all `package-lock.json` under `node_modules` folder.
First party action `actions/cache` needs a input which is an explicit `key` used for restoring and saving the cache. For packages caching, the most common `key` might be the hash result of contents from all `package-lock.json` under `node_modules` folder.
There are serval different ways to get the hash `key` input for `actions/cache` action.
@@ -38,7 +38,7 @@ There are serval different ways to get the hash `key` input for `actions/cache`
`hashFiles()` will only support hashing files under the `$GITHUB_WORKSPACE` since the expression evaluated on the runner, if customer use job container or container action, the runner won't have access to file system inside the container.
`hashFiles()` will only take 1 parameters:
- `hashFiles('**/package-lock.json')` // Search files under $GITHUB_WORKSPACE and calculate a hash for them
- `hashFiles('**/package-lock.json')` // Search files under `$GITHUB_WORKSPACE` and calculate a hash for them
**Question: Do we need to support more than one match patterns?**
Ex: `hashFiles('**/package-lock.json', '!toolkit/core/package-lock.json', '!toolkit/io/package-lock.json')`
@@ -52,7 +52,7 @@ This will help customer has better experience with the `actions/cache` action's
key: ${{hashFiles('**/package-lock.json')}}-${{github.ref}}-${{runner.os}}
```
For search pattern, we will use basic globbing (`*` `?` and `[]`) and globstar (`**`).
For search pattern, we will use basic globbing (`*`, `?`, and `[]`) and globstar (`**`).
Additional pattern details:
- Root relative paths with `github.workspace` (the main repo)

View File

@@ -15,7 +15,7 @@ This gives us good coverage across the board for secrets and secrets with a pref
However, we don't have great coverage for cases where the secret has a string appended to it before it is base64 encoded (i.e.: `base64($pass\n))`).
Most notably we've seen this as a result of user error where a user accidentially appends a newline or space character before encoding their secret in base64.
Most notably we've seen this as a result of user error where a user accidentally appends a newline or space character before encoding their secret in base64.
## Decision
@@ -45,4 +45,4 @@ This will result in us only revealing length or bit information when a prefix or
- In the case where a secret has a prefix or suffix added before base64 encoding, we may now reveal up to 20 bits of information and the length of the original string modulo 3, rather then the original 16 bits and no length information
- Secrets with a suffix appended before encoding will now be masked across the board. Previously it was only masked if it was a multiple of 3 characters
- Performance will suffer in a neglible way
- Performance will suffer in a negligible way

View File

@@ -24,7 +24,7 @@ The runner will look for a file `.setup_info` under the runner's root directory,
}
]
```
The runner will use `##[group]` and `##[endgroup]` to fold all detail info into an expandable group.
The runner will use `::group` and `::endgroup` to fold all detail info into an expandable group.
Both [virtual-environments](https://github.com/actions/virtual-environments) and self-hosted runners can use this mechanism to add extra logging info to the `Set up job` step's log.

View File

@@ -6,9 +6,9 @@
## Context
In addition to action's regular execution, action author may wants their action has a chance to participate in:
- Job initialize
My Action will collect machine resource usage (CPU/RAM/Disk) during a workflow job execution, we need to start perf recorder at the begin of the job.
In addition to action's regular execution, action author may wants their action to have a chance to participate in:
- Job initialization
My Action will collect machine resource usage (CPU/RAM/Disk) during a workflow job execution, we need to start perf recorder at the beginning of the job.
- Job cleanup
My Action will dirty local workspace or machine environment during execution, we need to cleanup these changes at the end of the job.
Ex: `actions/checkout@v2` will write `github.token` into local `.git/config` during execution, it has post job cleanup defined to undo the changes.
@@ -46,12 +46,12 @@ Container Action Example:
post-if: 'success()' // Optional
```
Both `pre` and `post` will has default `pre-if/post-if` sets to `always()`.
Both `pre` and `post` will have default `pre-if/post-if` set to `always()`.
Setting `pre` to `always()` will make sure no matter what condition evaluate result the `main` gets at runtime, the `pre` has always run already.
`pre` executes in order of how the steps are defined.
`pre` will always be added to job steps list during job setup.
> Action referenced from local repository (`./my-action`) won't get `pre` setup correctly since the repository haven't checkout during job initialize.
> We can't use GitHub api to download the repository since there is a about 3 mins delay between `git push` and the new commit available to download using GitHub api.
> Action referenced from local repository (`./my-action`) won't get `pre` setup correctly since the repository haven't checked-out during job initialization.
> We can't use GitHub api to download the repository since there is about a 3 minute delay between `git push` and the new commit available to download using GitHub api.
`post` will be pushed into a `poststeps` stack lazily when the action's `pre` or `main` execution passed `if` condition check and about to run, you can't have an action that only contains a `post`, we will pop and run each `post` after all `pre` and `main` finished.
> Currently `post` works for both repository action (`org/repo@v1`) and local action (`./my-action`)
@@ -60,7 +60,7 @@ Valid action:
- only has `main`
- has `pre` and `main`
- has `main` and `post`
- has `pre`, `main` and `post`
- has `pre`, `main`, and `post`
Invalid action:
- only has `pre`

View File

@@ -13,13 +13,13 @@ This is another version of [ADR275](https://github.com/actions/runner/pull/275)
## Decision
This ADR proposes that we add a `--labels` option to `config`, which could be used to add custom additional labels to the configured runner.
This ADR proposes that we add a `--labels` option to the `config`, which could be used to add custom additional labels to the configured runner.
For example, to add a single extra label the operator could run:
For example, to add a single additional label the operator could run:
```bash
./config.sh --labels mylabel
```
> Note: the current runner command line parsing and envvar override algorithm only supports a single argument (key).
> Note: the current runner command line parsing and envvar override algorithm only support a single argument (key).
This would add the label `mylabel` to the runner, and enable users to select the runner in their workflow using this label:
```yaml
@@ -39,17 +39,17 @@ runs-on: [self-hosted, mylabel, anotherlabel]
It would not be possible to remove labels from an existing runner using `config.sh`, instead labels would have to be removed using the GitHub UI.
The labels argument will split on commas, trim and discard empty strings. That effectively means don't use commans in unattended config label names. Alternatively we could choose to escape commans but it's a nice to have.
The labels argument will split on commas, trim and discard empty strings. That effectively means don't use commas in unattended config label names. Alternatively, we could choose to escape commas but it's a nice to have.
## Replace
If an existing runner exists and the option to replace is chosen (interactively of via unattend as in this scenario), then the labels will be replaced / overwritten (not merged).
If an existing runner exists and the option to replace is chosen (interactively or via unattended as in this scenario), then the labels will be replaced/overwritten (not merged).
## Overriding built-in labels
Note that it is possible to register "built-in" hosted labels like `ubuntu-latest` and is not considered an error. This is an effective way for the org / runner admin to dictate by policy through registration that this set of runners will be used without having to edit all the workflow files now and in the future.
Note that it is possible to register "built-in" hosted labels like `ubuntu-latest` and is not considered an error. This is an effective way for the org/runner admin to dictate by policy through registration that this set of runners will be used without having to edit all the workflow files now and in the future.
We will also not make other restrictions such as limiting explicitly adding os / arch labels and validating. We will assume that explicit labels were added for a reason and not restricting offers the most flexibility and future proofing / compat.
We will also not make other restrictions such as limiting explicitly adding os/arch labels and validating. We will assume that explicit labels were added for a reason and not restricting offers the most flexibility and future-proofing / compatibility.
## Consequences

View File

@@ -0,0 +1,378 @@
# ADR 0549: Composite Run Steps
**Date**: 2020-06-17
**Status**: Accepted
## Context
Customers want to be able to compose actions from actions (ex: https://github.com/actions/runner/issues/438)
An important step towards meeting this goal is to build functionality for actions where users can simply execute any number of steps.
### Guiding Principles
We don't want the workflow author to need to know how the internal workings of the action work. Users shouldn't know the internal workings of the composite action (for example, `default.shell` and `default.workingDir` should not be inherited from the workflow file to the action file). When deciding how to design certain parts of composite run steps, we want to treat it as one logical step for the consumer.
A composite action is treated as **one** individual job step (this is known as encapsulation).
## Decision
**In this ADR, we only support running multiple run steps in an Action.** In doing so, we build in support for mapping and flowing the inputs, outputs, and env variables (ex: All nested steps should have access to their parents' input variables and nested steps can overwrite the input variables).
### Composite Run Steps Features
This feature supports at the top action level:
- name
- description
- inputs
- runs
- outputs
This feature supports at the run step level:
- name
- id
- run
- env
- shell
- working-directory
This feature **does not support** at the run step level:
- timeout-minutes
- secrets
- conditionals (needs, if, etc.)
- continue-on-error
### Steps
Example `workflow.yml`
```yaml
jobs:
build:
runs-on: self-hosted
steps:
- id: step1
uses: actions/setup-python@v1
- id: step2
uses: actions/setup-node@v2
- uses: actions/checkout@v2
- uses: user/composite@v1
- name: workflow step 1
run: echo hello world 3
- name: workflow step 2
run: echo hello world 4
```
Example `user/composite/action.yml`
```yaml
runs:
using: "composite"
steps:
- run: pip install -r requirements.txt
shell: bash
- run: npm install
shell: bash
```
Example Output
```yaml
[npm installation output]
[pip requirements output]
echo hello world 3
echo hello world 4
```
We add a token called "composite" which allows our Runner code to process composite actions. By invoking "using: composite", our Runner code then processes the "steps" attribute, converts this template code to a list of steps, and finally runs each run step sequentially. If any step fails and there are no `if` conditions defined, the whole composite action job fails.
### Defaults
We will not support "defaults" in a composite action.
### Shell and Working-directory
For each run step in a composite action, the action author can set the `shell` and `working-directory` attributes for that step. The shell attribute is **required** for each run step because the action author does not know what the workflow author is using for the operating system so we need to explicitly prevent unknown behavior by making sure that each run step has an explicit shell **set by the action author.** On the other hand, `working-directory` is optional. Moreover, the composite action author can map in values from the `inputs` for its `shell` and `working-directory` attributes at the step level for an action.
For example,
`action.yml`
```yaml
inputs:
shell_1:
description: 'Your name'
default: 'pwsh'
steps:
- run: echo 1
shell: ${{ inputs.shell_1 }}
```
Note, the workflow file and action file are treated as separate entities. **So, the workflow `defaults` will never change the `shell` and `working-directory` value in the run steps in a composite action.** Note, `defaults` in a workflow only apply to run steps not "uses" steps (steps that use an action).
### Running Local Scripts
Example 'workflow.yml':
```yaml
jobs:
build:
runs-on: self-hosted
steps:
- uses: user/composite@v1
```
Example `user/composite/action.yml`:
```yaml
runs:
using: "composite"
steps:
- run: chmod +x ${{ github.action_path }}/test/script2.sh
shell: bash
- run: chmod +x $GITHUB_ACTION_PATH/script.sh
shell: bash
- run: ${{ github.action_path }}/test/script2.sh
shell: bash
- run: $GITHUB_ACTION_PATH/script.sh
shell: bash
```
Where `user/composite` has the file structure:
```
.
+-- action.yml
+-- script.sh
+-- test
| +-- script2.sh
```
Users will be able to run scripts located in their action folder by first prepending the relative path and script name with `$GITHUB_ACTION_PATH` or `github.action_path` which contains the path in which the composite action is downloaded to and where those "files" live. Note, you'll have to use `chmod` before running each script if you do not git check in your script files into your github repo with the executable bit turned on.
### Inputs
Example `workflow.yml`:
```yaml
steps:
- id: foo
uses: user/composite@v1
with:
your_name: "Octocat"
```
Example `user/composite/action.yml`:
```yaml
inputs:
your_name:
description: 'Your name'
default: 'Ethan'
runs:
using: "composite"
steps:
- run: echo hello ${{ inputs.your_name }}
shell: bash
```
Example Output:
```
hello Octocat
```
Each input variable in the composite action is only viewable in its own scope.
### Outputs
Example `workflow.yml`:
```yaml
...
steps:
- id: foo
uses: user/composite@v1
- run: echo random-number ${{ steps.foo.outputs.random-number }}
shell: bash
```
Example `user/composite/action.yml`:
```yaml
outputs:
random-number:
description: "Random number"
value: ${{ steps.random-number-generator.outputs.random-id }}
runs:
using: "composite"
steps:
- id: random-number-generator
run: echo "::set-output name=random-id::$(echo $RANDOM)"
shell: bash
```
Example Output:
```
::set-output name=my-output::43243
random-number 43243
```
Each of the output variables from the composite action is viewable from the workflow file that uses the composite action. In other words, every child's action output(s) are only viewable by its parent using dot notation (ex `steps.foo.outputs.random-number`).
Moreover, the output ids are only accessible within the scope where it was defined. Note that in the example above, in our `workflow.yml` file, it should not have access to output id (i.e. `random-id`). The reason why we are doing this is that we don't want to require the workflow author to know the internal workings of the composite action.
### Context
Similar to the workflow file, the composite action has access to the [same context objects](https://help.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#contexts) (ex: `github`, `env`, `strategy`).
### Environment
In the Composite Action, you'll only be able to use `::set-env::` to set environment variables just like you could with other actions.
### Secrets
**We will not support "Secrets" in a composite action for now. This functionality will be focused on in a future ADR.**
We'll pass the secrets from the composite action's parents (ex: the workflow file) to the composite action. Secrets can be created in the composite action with the secrets context. In the actions yaml, we'll automatically mask the secret.
### If-Condition
** `If` and `needs` conditions will not be supported in the composite run steps feature. It will be supported later on in a new feature. **
Old reasoning:
Example `workflow.yml`:
```yaml
steps:
- run: exit 1
- uses: user/composite@v1 # <--- this will run, as it's marked as always running
if: always()
```
Example `user/composite/action.yml`:
```yaml
runs:
using: "composite"
steps:
- run: echo "just succeeding"
shell: bash
- run: echo "I will run, as my current scope is succeeding"
shell: bash
if: success()
- run: exit 1
shell: bash
- run: echo "I will not run, as my current scope is now failing"
shell: bash
```
**We will not support "if-condition" in a composite action for now. This functionality will be focused on in a future ADR.**
See the paragraph below for a rudimentary approach (thank you to @cybojenix for the idea, example, and explanation for this approach):
The `if` statement in the parent (in the example above, this is the `workflow.yml`) shows whether or not we should run the composite action. So, our composite action will run since the `if` condition for running the composite action is `always()`.
**Note that the "if-condition" on the parent does not propagate to the rest of its children though.**
In the child action (in this example, this is the `action.yml`), it starts with a clean slate (in other words, no imposing if-conditions). Similar to the logic in the paragraph above, `echo "I will run, as my current scope is succeeding"` will run since the `if` condition checks if the previous steps **within this composite action** have not failed. `run: echo "I will not run, as my current scope is now failing"` will not run since the previous step resulted in an error and by default, the if expression is set to `success()` if the if-condition is not set for a step.
What if a step has `cancelled()`? We do the opposite of our approach above if `cancelled()` is used for any of our composite run steps. We will cancel any step that has this condition if the workflow is cancelled at all.
### Timeout-minutes
Example `workflow.yml`:
```yaml
steps:
- id: bar
uses: user/test@v1
timeout-minutes: 50
```
Example `user/composite/action.yml`:
```yaml
runs:
using: "composite"
steps:
- id: foo1
run: echo test 1
timeout-minutes: 10
shell: bash
- id: foo2
run: echo test 2
shell: bash
- id: foo3
run: echo test 3
timeout-minutes: 10
shell: bash
```
**We will not support "timeout-minutes" in a composite action for now. This functionality will be focused on in a future ADR.**
A composite action in its entirety is a job. You can set both timeout-minutes for the whole composite action or its steps as long as the sum of the `timeout-minutes` for each composite action step that has the attribute `timeout-minutes` is less than or equals to `timeout-minutes` for the composite action. There is no default timeout-minutes for each composite action step.
If the time taken for any of the steps in combination or individually exceeds the whole composite action `timeout-minutes` attribute, the whole job will fail (1). If an individual step exceeds its own `timeout-minutes` attribute but the total time that has been used including this step is below the overall composite action `timeout-minutes`, the individual step will fail but the rest of the steps will run based on their own `timeout-minutes` attribute (they will still abide by condition (1) though).
For reference, in the example above, if the composite step `foo1` takes 11 minutes to run, that step will fail but the rest of the steps, `foo1` and `foo2`, will proceed as long as their total runtime with the previous failed `foo1` action is less than the composite action's `timeout-minutes` (50 minutes). If the composite step `foo2` takes 51 minutes to run, it will cause the whole composite action job to fail.
The rationale behind this is that users can configure their steps with the `if` condition to conditionally set how steps rely on each other. Due to the additional capabilities that are offered with combining `timeout-minutes` and/or `if`, we wanted the `timeout-minutes` condition to be as dumb as possible and not affect other steps.
[Usage limits still apply](https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions?query=if%28%29#usage-limits)
### Continue-on-error
Example `workflow.yml`:
```yaml
steps:
- run: exit 1
- id: bar
uses: user/test@v1
continue-on-error: false
- id: foo
run: echo "Hello World" <------- This step will not run
```
Example `user/composite/action.yml`:
```yaml
runs:
using: "composite"
steps:
- run: exit 1
continue-on-error: true
shell: bash
- run: echo "Hello World 2" <----- This step will run
shell: bash
```
**We will not support "continue-on-error" in a composite action for now. This functionality will be focused on in a future ADR.**
If any of the steps fail in the composite action and the `continue-on-error` is set to `false` for the whole composite action step in the workflow file, then the steps below it will run. On the flip side, if `continue-on-error` is set to `true` for the whole composite action step in the workflow file, the next job step will run.
For the composite action steps, it follows the same logic as above. In this example, `"Hello World 2"` will be outputted because the previous step has `continue-on-error` set to `true` although that previous step errored.
### Visualizing Composite Action in the GitHub Actions UI
We want all the composite action's steps to be condensed into the original composite action node.
Here is a visual representation of the [first example](#Steps)
```yaml
| composite_action_node |
| echo hello world 1 |
| echo hello world 2 |
| echo hello world 3 |
| echo hello world 4 |
```
## Consequences
This ADR lays the framework for eventually supporting nested Composite Actions within Composite Actions. This ADR allows for users to run multiple run steps within a GitHub Composite Action with the support of inputs, outputs, environment, and context for use in any steps as well as the if, timeout-minutes, and the continue-on-error attributes for each Composite Action step.

View File

@@ -0,0 +1,92 @@
**Date**: 2021-06-10
**Status**: Accepted
## Context
We released [composite run steps](https://github.com/actions/runner/pull/554) last year which started our journey of reusing steps across different workflow files. To continue that journey, we want to expand composite run steps into composite actions.
We want to support the `uses` steps from workflows in composite actions, including:
- Container actions
- Javascript actions
- Other Composite actions (up to a limit of course!)
- The pre and post steps these actions can generate
## Guiding Principles
- Composite Actions should function as a single step or action, no matter how many steps it is composed of or how many levels of recursion it has
- In the future we may add a configurable option to make this no longer the case
- A workflow author should not need to understand the inner workings of a composite action in order to use it
- Composite actions should leverage inputs to get values they need, they will not have full access to the `context` objects. The secrets context will **not** be available to composite actions, users will need to pass these values in as an input.
- Other Actions should **just work** inside a composite action, without any code changes
## Decisions
### Composite Recursion Limit
- We will start with supporting a recursion limit of `10` composite actions deep
- We are free to bump this limit in the future, the code will be written to just require updating a variable. If the graph evaluates beyond the recursion limit, the job will fail in the pre-job phase (The `Set up job` step).
- A composite actions interface is its inputs and outputs, nothing else is carried over when invoking recursively.
### Pre/Post Steps in nested Actions
- We do not plan on adding the ability to configure a customizable pre or post step for composite actions at this time. However, we will execute the pre and post steps of any actions referenced in a composite action.
- Composite actions will generate a single pre-step and post-step for the entire composite action, even if there are multiple pre-steps and post-steps in the referenced actions.
- These steps will execute following the same ordering rules we have today, first to run has their pre step run first and their post step run last.
- For example, if you had a composite action with two pre steps and two posts steps:
```
- uses: action1
- uses: composite1
- uses: action2
```
The order of execution would be:
```
- prestep-action1
- prestep-composite1
- prestep-composite1-first-action-referenced
- prestep-composite1-second-action-referenced
- prestep-action2
- the job steps
- poststep-action2
- poststep-composite1
- poststep-composite1-the-second-action-referenced
- poststep-composite1-first-action-referenced
- poststep-action1
```
#### Set-state
- While the composite action has an individual combined pre/post action, the `set-state` command will not be shared.
- If the `set-state` command is used during a composite step, only the action that originally called `set-state` will have access to the env variable during the post run step.
- This prevents multiple actions that set the same state from interfering with the execution of another action's post step.
### Resolve Action Endpoint changes
- The resolve actions endpoint will now validate policy to ensure that the given workflow run has access to download that action.
- Older GHES/GHAE customers with newer runners will be locked out of composite uses steps until they upgrade their instance.
### Local actions
- Local actions will expand the tree, perform policy checks, and download actions Just in Time when the step is running.
- Like current local actions, we will not support presteps. If an action is running local, by the time we know that, the time to run presteps have already passed.
### If, continue-on-error, timeout-minutes - Not being considered at this time
- `if`, `continue-on-error`, `timeout-minutes` could be supported in composite run/uses steps. These values were not originally supported in our composite run steps implementation.
- Browsing the community forums and runner repo, there hasn't been a lot of noise asking for these features, so we will hold off on them.
- These values passed as input into the composite action will **not** be carried over as input into the individual steps the composite action runs.
### Defaults - Not being considered at this time
- In actions, we have the idea of [defaults](https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#defaultsrun) , which allow you to specify a shell and working directory in one location, rather then on each step.
- However, `shell` is currently required in composite run steps
- In regular run steps, it is optional, and defaults to a different value based on the OS.
- We want to prioritize the right experience for the consumer, and make the action author continue to explicitly set these values. We can consider improving this experience in the future.
## Consequences
- Workflows are now more reusable across multiple workflow files
- Composite actions implement most of the existing workflow run steps, with room to expand these in the future
- Feature flags will control this rollout

View File

@@ -0,0 +1,71 @@
# ADR 1438: Support Conditionals In Composite Actions
**Date**: 2021-10-13
**Status**: Accepted
## Context
We recently shipped composite actions, which allows you to reuse individual steps inside an action.
However, one of the [most requested features](https://github.com/actions/runner/issues/834) has been a way to support the `if` keyword.
### Goals
- We want to keep consistent with current behavior
- We want to support conditionals via the `if` keyword
- Our built in functions like `success` should be implementable without calling them, for example you can do `job.status == success` rather then `success()` currently.
### How does composite currently work?
Currently, we have limited conditional support in composite actions for `pre` and `post` steps.
These are based on the `job status`, and support keywords like `always()`, `failed()`, `success()` and `cancelled()`.
However, generic or main steps do **not** support conditionals.
By default, in a regular workflow, a step runs on the `success()` condition. Which looks at the **job** **status**, sees if it is successful and runs.
By default, in a composite action, main steps run until a single step fails in that composite action, then the composite action is halted early. It does **not** care about the job status.
Pre, and post steps in composite actions use the job status to determine if they should run.
### How do we go forward?
Well, if we think about what composite actions are currently doing when invoking main steps, they are checking if the current composite action is successful.
Lets formalize that concept into a "real" idea.
- We will add an `action_status` field to the github context to mimic the [job's context status](https://docs.github.com/en/actions/learn-github-actions/contexts#job-context).
- We have an existing concept that does this `action_path` which is only set for composite actions on the github context.
- In a composite action during a main step, the `success()` function will check if `action_status == success`, rather then `job_status == success`. Failure will work the same way.
- Pre and post steps in composite actions will not change, they will continue to check the job status.
### Nested Scenario
For nested composite actions, we will follow the existing behavior, you only care about your current composite action, not any parents.
For example, lets imagine a scenario with a simple nested composite action
```
- Job
- Regular Step
- Composite Action
- runs: exit 1
- if: always()
uses: A child composite action
- if: success()
runs: echo "this should print"
- runs: echo "this should also print"
- if: success()
runs: echo "this will not print as the current composite action has failed already"
```
The child composite actions steps should run in this example, the child composite action has not yet failed, so it should run all steps until a step fails. This is consistent with how a composite action currently works in production if the main job fails but a composite action is invoked with `if:always()` or `if: failure()`
### Other options explored
We could add the `current_step_status` to the job context rather then `__status` to the steps context, however this comes with two major downsides:
- We need to support the field for every type of step, because its non trivial to remove a field from the job context once it has been added (its readonly)
- For all actions besides composite it would only every be `success`
- Its weird to have a `current_step` value on the job context
- We also explored a `__status` on the steps context.
- The `__` is required to prevent us from colliding with a step with id: status
- This felt wrong because the naming was not smooth, and did not fit into current conventions.
### Consequences
- github context has a new field for the status of the current composite action.
- We support conditional's in composite actions
- We keep the existing behavior for all users, but allow them to expand that functionality.

View File

@@ -0,0 +1,83 @@
# ADR: Notification Hooks for Runners
## Context
This ADR details the design changes for supporting custom configurable hooks for on various runner events. This has been a long requested user feature [here](https://github.com/actions/runner/issues/1543), [here](https://github.com/actions/runner/issues/699) and [here](https://github.com/actions/runner/issues/1116) for users to have more information on runner observability, and for the ability to run cleanup and teardown jobs.
This feature is mainly intended for self hosted runner administrators.
**What we hope to solve with this feature**
1. A runner admininstrator is able to add custom scripts to cleanup their runner environment at the start or end of a job
2. A runner admininstrator is able to add custom scripts to help setup their runner environment at the beginning of a job, for reasons like [caching](https://github.com/actions/runner/issues/1543#issuecomment-1050346279)
3. A runner administrator is able to grab custom telemetry of jobs running on their self hosted runner
**What we don't think this will solve**
- Policy features that require certain steps run at the beginning or end of all jobs
- This would be better solved to in a central place in settings, rather then decentralized on each runner.
- The Proposed `Notification Hooks for Runners` is limited to self hosted runners, we don't beileve Policy features should be
- Reuse scenarios between jobs are covered by [composite actions](https://docs.github.com/en/actions/creating-actions/creating-a-composite-action) and [resuable workflows](https://docs.github.com/en/actions/using-workflows/reusing-workflows)
- Security applications, security should be handled on the policy side on the server, not decentralized on each runner
## Hooks
- We will expose 2 variables that users can set to enable hooks
- `ACTIONS_RUNNER_HOOK_JOB_STARTED`
- `ACTIONS_RUNNER_HOOK_JOB_COMPLETED`
You can set these variables to the **absolute** path of a a `.sh` or `.ps1` file.
We will execute `pwsh` (fallback to `powershell`) or `bash` (fallback to `sh`) as appropriate.
- `.sh` files will execute with the args `-e {pathtofile}`
- `.ps1` files will execute with the args `-command \". '{pathtofile}'\"`
We will **not** set the [standard flags we typically set](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsshell) for `runs` commands. So, if you want to set `pipefail` on `bash` for example, you will need to do that in your script.
### UI
We want to ensure the experience for users invoking workflows is good, if hooks take too long, you may feel your job is delayed or broken. So, much like `Set Up Job`, we will generate two new steps automatically in your job, one for each configured hook:
- `Set up runner`
- `Complete runner`
These steps will contain all of the output from invoking your hook, so you will have visibility into the runtime. We will also provide information on the path to the hook, and what shell we are invoking it as, much like we do for `run: ` steps.
### Contexts
When running your hooks, some context on your job may be helpful.
- The scripts will have access to the standard [default environment variables](https://docs.github.com/en/actions/learn-github-actions/environment-variables#default-environment-variables)
- Some of these variables are step specific like `GITHUB_ACTION`, in which case they will not be set
- You can pull the full webhook event payload from `GITHUB_EVENT_PATH`
### Commands
Should we expose [Commands](https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions) and [Environment Files](https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#environment-files)
**Yes**. Imagine a scenario where a runner administrator is deprecating a runner pool, and they need to [warn users](https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-a-warning-message) to swap to a different pool, we should support them in doing this. However, there are some limitations:
- [save-state](https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#sending-values-to-the-pre-and-post-actions) will **not** be supported, these are not traditional steps with pre and post actions
- [set-output](https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#using-workflow-commands-to-access-toolkit-functions) will **not** be supported, there is no `id` as this is not a traditional step
### Environment Files
We will also enable [Environment Files](https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#environment-files) to support setup scenarios for the runner environment.
While a self hosted runner admin can [set env variables](https://docs.github.com/en/actions/hosting-your-own-runners/using-a-proxy-server-with-self-hosted-runners#using-a-env-file-to-set-the-proxy-configuration), these apply to all jobs. By enabling the ability to `add a path` and `set an env` we give runner admins the ability to do this dynamically based on the [workflows environment variables](https://docs.github.com/en/actions/learn-github-actions/environment-variables#default-environment-variables) to empower setup scenarios.
### Exit codes
These are **synchronous** hooks, so they will block job execution while they are being run. Exit code 0 will indicate a successful run of the hook and we will proceed with the job, any other exit code will fail the job with an appropriate annotation.
- There will be no support for `continue-on-error`
## Key Decisions
- We will expose 2 variables that users can set to enable hooks
- `ACTIONS_RUNNER_HOOK_JOB_STARTED`
- `ACTIONS_RUNNER_HOOK_JOB_COMPLETED`
- Users can set these variables to the path of a `.sh` or `.ps1` file, which we will execute when Jobs are started or completed.
- Output from these will be added to a new step at the start/end of a job named `Set up runner` or `Complete runner`.
- These steps will only be generated on runs with these hooks
- These hooks `always()` execute if the env variable is set
- These files will execute as the Runner user, outside of any container specification on the job
- These are **synchronous** hooks
- Runner admins can execute a background process for async hooks if they want
- We will fail the job and halt execution on any exit code that is not 0. The Runner admin is responsible for returning the correct exit code and ensuring resilency.
- This includes that the runner user needs access to the file in the env and the file must exist
- There will be no `continue-on-error` type option on launch
- There will be no `timeout` option on launch
## Consequences
- Runner admins have the ability to tie into the runner job execution to publish their own telemetry or perform their own cleanup or setup
- New steps will be added to the UI showcasing the output of these hooks

View File

@@ -0,0 +1,596 @@
# ADR 0000: Container Hooks
**Date**: 2022-05-12
**Status**: Accepted
# Background
[Job Hooks](https://github.com/actions/runner/blob/main/docs/adrs/1751-runner-job-hooks.md) have given users the ability to customize how their self hosted runners run a job.
Users also want the ability to customize how they run containers during the scope of the job, rather then being locked into the docker implementation we have in the runner. They may want to use podman, kubernetes, or even change the docker commands we run.
We should give them that option, and publish examples how how they can create their own hooks.
# Guiding Principles
- **Extensibility** is the focus, we need to make sure we are flexible enough to cover current and future scenarios, even at the cost of making it harder to utilize these hooks
- Args should map **directly** to yaml values provided by the user.
- For example, the current runner overrides `HOME`, we can do that in the hook, but we shouldn't pass that hook as an ENV with the other env's the user has set, as that is not user input, it is how the runner invokes containers
## Interface
- You will set the variable `ACTIONS_RUNNER_CONTAINER_HOOK=/Users/foo/runner/hooks.js` which is the entrypoint to your hook handler.
- There is no partial opt in, you must handle every hook
- We will pass a command and some args via `stdin`
- An exit code of 0 is a success, every other exit code is a failure
- We will support the same runner commands we support in [Job Hooks](https://github.com/actions/runner/blob/main/docs/adrs/1751-runner-job-hooks.md)
- On timeout, we will send a sigint to your process. If you fail to terminate within a reasonable amount of time, we will send a sigkill, and eventually kill the process tree.
An example input looks like
```json
{
"command": "job_cleanup",
"responseFile": "/users/thboop/runner/_work/{guid}.json",
"args": {},
"state":
{
"id": "82e8219701fe096a35941d869cf8d71af1d943b5d3bdd718850fb87ac3042480"
}
}
```
`command` is the command we expect you to invoke
`responseFile` is the file you need to write your output to, if the command has output
`args` are the specific arguments the command needs
`state` is a json blog you can pass around to maintain your state, this is covered in more details below.
### Writing responses to a file
All text written to stdout or stderr should appear in the job or step logs. With that in mind, we support a few ways to actually return data:
1. Wrapping the json in some unique tag and processing it like we do commands
2. Writing to a file
For 1, users typically view logging information as a safe action, so we worry someone accidentialy logging unsantized information and causing unexpected or un-secure behavior. We eventually plan to move off of stdout/stderr style commands in favor of a runner cli.
Investing in this area doesn't make a lot of sense at this time.
While writing to a file to communicate isn't the most ideal pattern, its an existing pattern in the runner and serves us well, so lets reuse it.
### Output
Your output must be correctly formatted json. An example output looks like:
```
{
"state": {},
"context"
{
"container" :
{
"id": "82e8219701fe096a35941d869cf8d71af1d943b5d3bdd718850fb87ac3042480"
"network": "github_network_53269bd575974817b43f4733536b200c"
}
"services": {
"redis": {
"id": "60972d9aa486605e66b0dad4abb638dc3d9116f566579e418166eedb8abb9105",
"ports": {
"8080": "8080"
},
"network": "github_network_53269bd575974817b43f4733536b200c"
}
}
"alpine: true,
}
```
`state` is a unique field any command can return. If it is not empty, we will store the state for you and pass it into all future commands. You can overwrite it by having the next hook invoked return a unique state.
Other fields are dependent upon the command being run.
### Versioning
We will not version these hooks at launch. If needed, we can always major version split these hooks in the future. We will ship in Beta to allow for breaking changes for a few months.
### The Job Context
The [job context](https://docs.github.com/en/actions/learn-github-actions/contexts#example-contents-of-the-job-context) currently has a variety of fields that correspond to containers. We should consider allowing hooks to populate new fields in the job context. That is out of scope for this original release however.
## Hooks
Hooks are to be implemented at a very high level, and map to actions the runner does, rather then specific docker actions like `docker build` or `docker create`. By mapping to runner actions, we create a very extensible framework that is flexible enough to solve any user concerns in the future. By providing first party implementations, we give users easy starting points to customize specific hooks (like `docker build`) without having to write full blown solutions.
The other would be to provide hooks that mirror every docker call we make, and expose more hooks to help support k8s users, with the expectation that users may have to no-op on multiple hooks if they don't correspond to our use case.
Why we don't want to go that way
- It feels clunky, users need to understand which hooks they need to implement and which they can ignore, which isn't a great UX
- It doesn't scale well, I don't want to build a solution where we may need to add more hooks, by mapping to runner actions, updating hooks is a painful experience for users
- Its overwhelming, its easier to tell users to build 4 hooks and track data themselves, rather then 16 hooks where the runner needs certain information and then needs to provide that information back into each hook. If we expose `Container Create`, you need to return the container you created, then we do `container run` which uses that container. If we just give you an image and say create and run this container, you don't need to store the container id in the runner, and it maps better to k8s scenarios where we don't really have container ids.
### Prepare_job hook
The `prepare_job` hook is called when a job is started. We pass in any job or service containers the job has. We expect that you:
- Prune anything from previous jobs if needed
- Create a network if needed
- Pull the job and service containers
- Start the job container
- Start the service containers
- Write to the response file some information we need
- Required: if the container is alpine, otherwise x64
- Optional: any context fields you want to set on the job context, otherwise they will be unavailable for users to use
- Return 0 when the health checks have succeeded and the job/service containers are started
This hook will **always** be called if you have container hooks enabled, even if no service or job containers exist in the job. This allows you to fail the job or implement a default job container if you want to and no job container has been provided.
<details>
<summary>Example Input</summary>
<br>
```
{
"command": "prepare_job",
"responseFile": "/users/thboop/runner/_work/{guid}.json",
"state": {},
"args":
{
"jobContainer": {
"image": "node:14.16",
"workingDirectory": "/__w/thboop-test2/thboop-test2",
"createOptions": "--cpus 1",
"environmentVariables": {
"NODE_ENV": "development"
},
"userMountVolumes:[
{
"sourceVolumePath": "my_docker_volume",
"targetVolumePath": "/volume_mount",
"readOnly": false
},
],
"mountVolumes": [
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/_work",
"targetVolumePath": "/__w",
"readOnly": false
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/externals",
"targetVolumePath": "/__e",
"readOnly": true
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/_work/_temp",
"targetVolumePath": "/__w/_temp",
"readOnly": false
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/_work/_actions",
"targetVolumePath": "/__w/_actions",
"readOnly": false
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/_work/_tool",
"targetVolumePath": "/__w/_tool",
"readOnly": false
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/_work/_temp/_github_home",
"targetVolumePath": "/github/home",
"readOnly": false
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/_work/_temp/_github_workflow",
"targetVolumePath": "/github/workflow",
"readOnly": false
}
],
"registry": {
"username": "foo",
"password": "bar",
"serverUrl": "https://index.docker.io/v1"
},
"portMappings": [ "8080:80/tcp", "8080:80/udp" ]
},
"services": [
{
"contextName": "redis",
"image": "redis",
"createOptions": "--cpus 1",
"environmentVariables": {},
"mountVolumes": [],
"portMappings": [ "8080:80/tcp", "8080:80/udp" ]
"registry": {
"username": "foo",
"password": "bar",
"serverUrl": "https://index.docker.io/v1"
}
}
]
}
}
```
</details>
<details>
<summary>Field Descriptions</summary>
<br>
```
Arg Fields:
jobContainer: **Optional** An Object containing information about the specified job container
"image": **Required** A string containing the docker image
"workingDirectory": **Required** A string containing the absolute path of the working directory
"createOptions": **Optional** The optional create options specified in the [YAML](https://docs.github.com/en/actions/using-jobs/running-jobs-in-a-container#example-running-a-job-within-a-container)
"environmentVariables": **Optional** A map of key value env's to set
"userMountVolumes: ** Optional** an array of user mount volumes set in the [YAML](https://docs.github.com/en/actions/using-jobs/running-jobs-in-a-container#example-running-a-job-within-a-container)
"sourceVolumePath": **Required** The source path to the volume to be mounted into the docker container
"targetVolumePath": **Required** The target path to the volume to be mounted into the docker container
"readOnly": false **Required** whether or not the mount should be read only
"mountVolumes": **Required** an array of mounts to mount into the container, same fields as above
"sourceVolumePath": **Required** The source path to the volume to be mounted into the docker container
"targetVolumePath": **Required** The target path to the volume to be mounted into the docker container
"readOnly": false **Required** whether or not the mount should be read only
"registry" **Optional** docker registry credentials to use when using a private container registry
"username": **Optional** the username
"password": **Optional** the password
"serverUrl": **Optional** the registry url
"portMappings": **Optional** an array of source:target ports to map into the container
"services": an array of service containers to spin up
"contextName": **Required** the name of the service in the Job context
"image": **Required** A string containing the docker image
"createOptions": **Optional** The optional create options specified in the [YAML](https://docs.github.com/en/actions/using-jobs/running-jobs-in-a-container#example-running-a-job-within-a-container)
"environmentVariables": **Optional** A map of key value env's to set
"mountVolumes": **Required** an array of mounts to mount into the container, same fields as above
"sourceVolumePath": **Required** The source path to the volume to be mounted into the docker container
"targetVolumePath": **Required** The target path to the volume to be mounted into the docker container
"readOnly": false **Required** whether or not the mount should be read only
"registry" **Optional** docker registry credentials to use when using a private container registry
"username": **Optional** the username
"password": **Optional** the password
"serverUrl": **Optional** the registry url
"portMappings": **Optional** an array of source:target ports to map into the container
```
</details>
<details>
<summary>Example Output</summary>
<br>
```
{
"state":
{
"network": "github_network_53269bd575974817b43f4733536b200c",
"jobContainer" : "82e8219701fe096a35941d869cf8d71af1d943b5d3bdd718850fb87ac3042480",
"serviceContainers":
{
"redis": "60972d9aa486605e66b0dad4abb638dc3d9116f566579e418166eedb8abb9105"
}
},
"context"
{
"container" :
{
"id": "82e8219701fe096a35941d869cf8d71af1d943b5d3bdd718850fb87ac3042480"
"network": "github_network_53269bd575974817b43f4733536b200c"
}
"services": {
"redis": {
"id": "60972d9aa486605e66b0dad4abb638dc3d9116f566579e418166eedb8abb9105",
"ports": {
"8080": "8080"
},
"network": "github_network_53269bd575974817b43f4733536b200c"
}
}
"alpine: true,
}
```
</details>
### Cleanup Job
The `cleanup_job` hook is called at the end of a job and expects you to:
- Stop any running service or job containers (or the equiavalent pod)
- Stop the network (if one exists)
- Delete any job or service containers (or the equiavalent pod)
- Delete the network (if one exists)
- Cleanup anything else that was created for the run
Its input looks like
<details>
<summary>Example Input</summary>
<br>
```
"command": "cleanup_job",
"responseFile": null,
"state":
{
"network": "github_network_53269bd575974817b43f4733536b200c",
"jobContainer" : "82e8219701fe096a35941d869cf8d71af1d943b5d3bdd718850fb87ac3042480",
"serviceContainers":
{
"redis": "60972d9aa486605e66b0dad4abb638dc3d9116f566579e418166eedb8abb9105"
}
}
"args": {}
```
</details>
No args are provided.
No output is expected.
### Run Container Step
The `run_container_step` is called once per container action in your job and expects you to:
- Pull or build the required container (or fail if you cannot)
- Run the container action and return the exit code of the container
- Stream any step logs output to stdout and stderr
- Cleanup the container after it executes
<details>
<summary>Example Input for Image</summary>
<br>
```
"command": "run_container_step",
"responseFile": null,
"state":
{
"network": "github_network_53269bd575974817b43f4733536b200c",
"jobContainer" : "82e8219701fe096a35941d869cf8d71af1d943b5d3bdd718850fb87ac3042480",
"serviceContainers":
{
"redis": "60972d9aa486605e66b0dad4abb638dc3d9116f566579e418166eedb8abb9105"
}
}
"args":
{
"image": "node:14.16",
"dockerfile": null,
"entryPointArgs": ["-f", "/dev/null"],
"entryPoint": "tail",
"workingDirectory": "/__w/thboop-test2/thboop-test2",
"createOptions": "--cpus 1",
"environmentVariables": {
"NODE_ENV": "development"
},
"prependPath":["/foo/bar", "bar/foo"]
"userMountVolumes:[
{
"sourceVolumePath": "my_docker_volume",
"targetVolumePath": "/volume_mount",
"readOnly": false
},
],
"mountVolumes": [
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/_work",
"targetVolumePath": "/__w",
"readOnly": false
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/externals",
"targetVolumePath": "/__e",
"readOnly": true
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/_work/_temp",
"targetVolumePath": "/__w/_temp",
"readOnly": false
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/_work/_actions",
"targetVolumePath": "/__w/_actions",
"readOnly": false
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/_work/_tool",
"targetVolumePath": "/__w/_tool",
"readOnly": false
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/_work/_temp/_github_home",
"targetVolumePath": "/github/home",
"readOnly": false
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/_work/_temp/_github_workflow",
"targetVolumePath": "/github/workflow",
"readOnly": false
}
],
"registry": null,
"portMappings": { "80": "801" }
},
```
</details>
<details>
<summary>Example Input for dockerfile</summary>
<br>
```
"command": "run_container_step",
"responseFile": null,
"state":
{
"network": "github_network_53269bd575974817b43f4733536b200c",
"jobContainer" : "82e8219701fe096a35941d869cf8d71af1d943b5d3bdd718850fb87ac3042480",
"services":
{
"redis": "60972d9aa486605e66b0dad4abb638dc3d9116f566579e418166eedb8abb9105"
}
}
"args":
{
"image": null,
"dockerfile": /__w/_actions/foo/dockerfile,
"entryPointArgs": ["hello world"],
"entryPoint": "echo",
"workingDirectory": "/__w/thboop-test2/thboop-test2",
"createOptions": "--cpus 1",
"environmentVariables": {
"NODE_ENV": "development"
},
"prependPath":["/foo/bar", "bar/foo"]
"userMountVolumes:[
{
"sourceVolumePath": "my_docker_volume",
"targetVolumePath": "/volume_mount",
"readOnly": false
},
],
"mountVolumes": [
{
"sourceVolumePath": "my_docker_volume",
"targetVolumePath": "/volume_mount",
"readOnly": false
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/_work",
"targetVolumePath": "/__w",
"readOnly": false
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/externals",
"targetVolumePath": "/__e",
"readOnly": true
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/_work/_temp",
"targetVolumePath": "/__w/_temp",
"readOnly": false
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/_work/_actions",
"targetVolumePath": "/__w/_actions",
"readOnly": false
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/_work/_tool",
"targetVolumePath": "/__w/_tool",
"readOnly": false
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/_work/_temp/_github_home",
"targetVolumePath": "/github/home",
"readOnly": false
},
{
"sourceVolumePath": "/home/thomas/git/runner/_layout/_work/_temp/_github_workflow",
"targetVolumePath": "/github/workflow",
"readOnly": false
}
],
"registry": null,
"portMappings": [ "8080:80/tcp", "8080:80/udp" ]
},
}
```
</details>
<details>
<summary>Field Descriptions</summary>
<br>
```
Arg Fields:
"image": **Optional** A string containing the docker image. Otherwise a dockerfile must be provided
"dockerfile": **Optional** A string containing the path to the dockerfile, otherwise an image must be provided
"entryPointArgs": **Optional** A list containing the entry point args
"entryPoint": **Optional** The container entry point to use if the default image entrypoint should be overwritten
"workingDirectory": **Required** A string containing the absolute path of the working directory
"createOptions": **Optional** The optional create options specified in the [YAML](https://docs.github.com/en/actions/using-jobs/running-jobs-in-a-container#example-running-a-job-within-a-container)
"environmentVariables": **Optional** A map of key value env's to set
"prependPath": **Optional** an array of additional paths to prepend to the $PATH variable
"userMountVolumes: ** Optional** an array of user mount volumes set in the [YAML](https://docs.github.com/en/actions/using-jobs/running-jobs-in-a-container#example-running-a-job-within-a-container)
"sourceVolumePath": **Required** The source path to the volume to be mounted into the docker container
"targetVolumePath": **Required** The target path to the volume to be mounted into the docker container
"readOnly": false **Required** whether or not the mount should be read only
"mountVolumes": **Required** an array of mounts to mount into the container, same fields as above
"sourceVolumePath": **Required** The source path to the volume to be mounted into the docker container
"targetVolumePath": **Required** The target path to the volume to be mounted into the docker container
"readOnly": false **Required** whether or not the mount should be read only
"registry" **Optional** docker registry credentials to use when using a private container registry
"username": **Optional** the username
"password": **Optional** the password
"serverUrl": **Optional** the registry url
"portMappings": **Optional** an array of source:target ports to map into the container
```
</details>
No output is expected
Currently we build all container actions at the start of the job. By doing it during the hook, we move this to just in time building for hooks. We could expose a hook to build/pull a container action, and have those called at the start of a job, but doing so would require hook authors to track the build containers in the state, which could be painful.
### Run Script Step
The `run_script_step` expects you to:
- Invoke the provided script inside the job container and return the exit code
- Stream any step log output to stdout and stderr
<details>
<summary>Example Input</summary>
<br>
```
"command": "run_script_step",
"responseFile": null,
"state":
{
"network": "github_network_53269bd575974817b43f4733536b200c",
"jobContainer" : "82e8219701fe096a35941d869cf8d71af1d943b5d3bdd718850fb87ac3042480",
"serviceContainers":
{
"redis": "60972d9aa486605e66b0dad4abb638dc3d9116f566579e418166eedb8abb9105"
}
}
"args":
{
"entryPointArgs": ["-e", "/runner/temp/abc123.sh"],
"entryPoint": "bash",
"environmentVariables": {
"NODE_ENV": "development"
},
"prependPath": ["/foo/bar", "bar/foo"],
"workingDirectory": "/__w/thboop-test2/thboop-test2"
}
```
</details>
<details>
<summary>Field Descriptions</summary>
<br>
```
Arg Fields:
"entryPointArgs": **Optional** A list containing the entry point args
"entryPoint": **Optional** The container entry point to use if the default image entrypoint should be overwritten
"prependPath": **Optional** an array of additional paths to prepend to the $PATH variable
"workingDirectory": **Required** A string containing the absolute path of the working directory
"environmentVariables": **Optional** A map of key value env's to set
```
</details>
No output is expected
## Limitations
- We will only support linux on launch
- Hooks are set by the runner admin, and thus are only supported on self hosted runners
## Consequences
- We support non docker scenarios for self hosted runners and allow customers to customize their docker invocations
- We ship/maintain docs on docker hooks and an open source repo with examples
- We support these hooks and add enough telemetry to be able to troubleshoot support issues as they come in.

78
docs/automate.md Normal file
View File

@@ -0,0 +1,78 @@
# Automate Configuring Self-Hosted Runners
## Export PAT
Before running any of these sample scripts, create a GitHub PAT and export it before running the script
```bash
export RUNNER_CFG_PAT=yourPAT
```
## Create running as a service
**Scenario**: Run on a machine or VM ([not container](#why-cant-i-use-a-container)) which automates:
- Resolving latest released runner
- Download and extract latest
- Acquire a registration token
- Configure the runner
- Run as a systemd (linux) or Launchd (osx) service
:point_right: [Sample script here](../scripts/create-latest-svc.sh) :point_left:
Run as a one-liner. NOTE: replace with yourorg/yourrepo (repo level) or just yourorg (org level)
```bash
curl -s https://raw.githubusercontent.com/actions/runner/main/scripts/create-latest-svc.sh | bash -s yourorg/yourrepo
```
You can call the script with additional arguments:
```bash
# Usage:
# export RUNNER_CFG_PAT=<yourPAT>
# ./create-latest-svc -s scope -g [ghe_domain] -n [name] -u [user] -l [labels]
# -s required scope: repo (:owner/:repo) or org (:organization)
# -g optional ghe_hostname: the fully qualified domain name of your GitHub Enterprise Server deployment
# -n optional name of the runner, defaults to hostname
# -u optional user svc will run as, defaults to current
# -l optional list of labels (split by comma) applied on the runner"
```
Use `--` to pass any number of optional named parameters:
```
curl -s https://raw.githubusercontent.com/actions/runner/main/scripts/create-latest-svc.sh | bash -s -- -s myorg/myrepo -n myname -l label1,label2
```
### Why can't I use a container?
The runner is installed as a service using `systemd` and `systemctl`. Docker does not support `systemd` for service configuration on a container.
## Uninstall running as service
**Scenario**: Run on a machine or VM ([not container](#why-cant-i-use-a-container)) which automates:
- Stops and uninstalls the systemd (linux) or Launchd (osx) service
- Acquires a removal token
- Removes the runner
:point_right: [Sample script here](../scripts/remove-svc.sh) :point_left:
Repo level one liner. NOTE: replace with yourorg/yourrepo (repo level) or just yourorg (org level)
```bash
curl -s https://raw.githubusercontent.com/actions/runner/main/scripts/remove-svc.sh | bash -s yourorg/yourrepo
```
### Delete an offline runner
**Scenario**: Deletes a registered runner that is offline:
- Ensures the runner is offline
- Resolves id from name
- Deletes the runner
:point_right: [Sample script here](../scripts/delete.sh) :point_left:
Repo level one-liner. NOTE: replace with yourorg/yourrepo (repo level) or just yourorg (org level) and replace runnername
```bash
curl -s https://raw.githubusercontent.com/actions/runner/main/scripts/delete.sh | bash -s yourorg/yourrepo runnername
```

67
docs/checks/actions.md Normal file
View File

@@ -0,0 +1,67 @@
# Actions Connection Check
## What is this check for?
Make sure the runner has access to actions service for GitHub.com or GitHub Enterprise Server
- For GitHub.com
- The runner needs to access `https://api.github.com` for downloading actions.
- The runner needs to access `https://vstoken.actions.githubusercontent.com/_apis/.../` for requesting an access token.
- The runner needs to access `https://pipelines.actions.githubusercontent.com/_apis/.../` for receiving workflow jobs.
These can by tested by running the following `curl` commands from your self-hosted runner machine:
```
curl -v https://api.github.com/api/v3/zen
curl -v https://vstoken.actions.githubusercontent.com/_apis/health
curl -v https://pipelines.actions.githubusercontent/_apis/health
```
- For GitHub Enterprise Server
- The runner needs to access `https://[hostname]/api/v3` for downloading actions.
- The runner needs to access `https://[hostname]/_services/vstoken/_apis/.../` for requesting an access token.
- The runner needs to access `https://[hostname]/_services/pipelines/_apis/.../` for receiving workflow jobs.
These can by tested by running the following `curl` commands from your self-hosted runner machine, replacing `[hostname]` with the hostname of your appliance, for instance `github.example.com`:
```
curl -v https://[hostname]/api/v3/zen
curl -v https://[hostname]/_services/vstoken/_apis/health
curl -v https://[hostname]/_services/pipelines/_apis/health
```
A common cause of this these connectivity issues is if your to GitHub Enterprise Server appliance is using [the self-signed certificate that is enabled the first time](https://docs.github.com/en/enterprise-server/admin/configuration/configuring-network-settings/configuring-tls) your appliance is started. As self-signed certificates are not trusted by web browsers and Git clients, these clients (including the GitHub Actions runner) will report certificate warnings.
We recommend [upload a certificate signed by a trusted authority](https://docs.github.com/en/enterprise-server/admin/configuration/configuring-network-settings/configuring-tls) to GitHub Enterprise Server, or enabling the built-in ][Let's Encrypt support](https://docs.github.com/en/enterprise-server/admin/configuration/configuring-network-settings/configuring-tls).
## What is checked?
- DNS lookup for api.github.com or myGHES.com using dotnet
- Ping api.github.com or myGHES.com using dotnet
- Make HTTP GET to https://api.github.com or https://myGHES.com/api/v3 using dotnet, check response headers contains `X-GitHub-Request-Id`
---
- DNS lookup for vstoken.actions.githubusercontent.com using dotnet
- Ping vstoken.actions.githubusercontent.com using dotnet
- Make HTTP GET to https://vstoken.actions.githubusercontent.com/_apis/health or https://myGHES.com/_services/vstoken/_apis/health using dotnet, check response headers contains `x-vss-e2eid`
---
- DNS lookup for pipelines.actions.githubusercontent.com using dotnet
- Ping pipelines.actions.githubusercontent.com using dotnet
- Make HTTP GET to https://pipelines.actions.githubusercontent.com/_apis/health or https://myGHES.com/_services/pipelines/_apis/health using dotnet, check response headers contains `x-vss-e2eid`
- Make HTTP POST to https://pipelines.actions.githubusercontent.com/_apis/health or https://myGHES.com/_services/pipelines/_apis/health using dotnet, check response headers contains `x-vss-e2eid`
## How to fix the issue?
### 1. Check the common network issue
> Please check the [network doc](./network.md)
### 2. SSL certificate related issue
If you are seeing `System.Net.Http.HttpRequestException: The SSL connection could not be established, see inner exception.` in the log, it means the runner can't connect to Actions service due to SSL handshake failure.
> Please check the [SSL cert doc](./sslcert.md)
## Still not working?
Contact [GitHub Support](https://support.github.com] if you have further questuons, or log an issue at https://github.com/actions/runner if you think it's a runner issue.

34
docs/checks/git.md Normal file
View File

@@ -0,0 +1,34 @@
# Git Connection Check
## What is this check for?
Make sure `git` can access GitHub.com or your GitHub Enterprise Server.
## What is checked?
The test is done by executing
```bash
# For GitHub.com
git ls-remote --exit-code https://github.com/actions/checkout HEAD
# For GitHub Enterprise Server
git ls-remote --exit-code https://ghes.me/actions/checkout HEAD
```
The test also set environment variable `GIT_TRACE=1` and `GIT_CURL_VERBOSE=1` before running `git ls-remote`, this will make `git` to produce debug log for better debug any potential issues.
## How to fix the issue?
### 1. Check the common network issue
> Please check the [network doc](./network.md)
### 2. SSL certificate related issue
If you are seeing `SSL Certificate problem:` in the log, it means the `git` can't connect to the GitHub server due to SSL handshake failure.
> Please check the [SSL cert doc](./sslcert.md)
## Still not working?
Contact GitHub customer service or log an issue at https://github.com/actions/runner if you think it's a runner issue.

26
docs/checks/internet.md Normal file
View File

@@ -0,0 +1,26 @@
# Internet Connection Check
## What is this check for?
Make sure the runner has access to https://api.github.com
The runner needs to access https://api.github.com to download any actions from the marketplace.
Even the runner is configured to GitHub Enterprise Server, the runner can still download actions from GitHub.com with [GitHub Connect](https://docs.github.com/en/enterprise-server@2.22/admin/github-actions/enabling-automatic-access-to-githubcom-actions-using-github-connect)
## What is checked?
- DNS lookup for api.github.com using dotnet
- Ping api.github.com using dotnet
- Make HTTP GET to https://api.github.com using dotnet, check response headers contains `X-GitHub-Request-Id`
## How to fix the issue?
### 1. Check the common network issue
> Please check the [network doc](./network.md)
## Still not working?
Contact GitHub customer service or log an issue at https://github.com/actions/runner if you think it's a runner issue.

62
docs/checks/network.md Normal file
View File

@@ -0,0 +1,62 @@
## Common Network Related Issues
### Common things that can cause the runner to not working properly
- A bug in the runner or the dotnet framework that causes the actions runner to be unable to make Http requests in a certain network environment.
- A Proxy or Firewall may block certain HTTP method, such as blocking all POST and PUT calls which the runner will use to upload logs.
- A Proxy or Firewall may only allows requests with certain user-agent to pass through and the actions runner user-agent is not in the allow list.
- A Proxy try to decrypt and exam HTTPS traffic for security purpose but cause the actions-runner to fail to finish SSL handshake due to the lack of trusting proxy's CA.
- The SSL handshake may fail if the client and server do not support the same TLS version, or the same cipher suites.
- A Proxy may try to modify the HTTPS request (like add or change some http headers) and causes the request become incompatible with the Actions Service (ASP.NetCore), Ex: [Nginx](https://github.com/dotnet/aspnetcore/issues/17081)
- Firewall rules that block action runner from accessing certain hosts, ex: `*.github.com`, `*.actions.githubusercontent.com`, etc
### Identify and solve these problems
The key is to figure out where is the problem, the network environment, or the actions runner?
Use a 3rd party tool to make the same requests as the runner did would be a good start point.
- Use `nslookup` to check DNS
- Use `ping` to check Ping
- Use `traceroute`, `tracepath`, or `tracert` to check the network route between the runner and the Actions service
- Use `curl -v` to check the network stack, good for verifying default certificate/proxy settings.
- Use `Invoke-WebRequest` from `pwsh` (`PowerShell Core`) to check the dotnet network stack, good for verifying bugs in the dotnet framework.
If the 3rd party tool is also experiencing the same error as the runner does, then you might want to contact your network administrator for help.
Otherwise, contact GitHub customer support or log an issue at https://github.com/actions/runner
### Troubleshooting: Why can't I configure a runner?
If you are having trouble connecting, try these steps:
1. Validate you can reach our endpoints from your web browser. If not, double check your local network connection
- For hosted Github:
- https://api.github.com/
- https://vstoken.actions.githubusercontent.com/_apis/health
- https://pipelines.actions.githubusercontent.com/_apis/health
- For GHES/GHAE
- https://myGHES.com/_services/vstoken/_apis/health
- https://myGHES.com/_services/pipelines/_apis/health
- https://myGHES.com/api/v3
2. Validate you can reach those endpoints in powershell core
- The runner runs on .net core, lets validate the local settings for that stack
- Open up `pwsh`
- Run the command using the urls above `Invoke-WebRequest {url}`
3. If not, get a packet trace using a tool like wireshark and start looking at the TLS handshake.
- If you see a Client Hello followed by a Server RST:
- You may need to configure your TLS settings to use the correct version
- You should support TLS version 1.2 or later
- You may need to configure your TLS settings to have up to date cipher suites, this may be solved by system updates and patches.
- Most notably, on windows server 2012 make sure [the tls cipher suite update](https://support.microsoft.com/en-us/topic/update-adds-new-tls-cipher-suites-and-changes-cipher-suite-priorities-in-windows-8-1-and-windows-server-2012-r2-8e395e43-c8ef-27d8-b60c-0fc57d526d94) is installed
- Your firewall, proxy or network configuration may be blocking the connection
- You will want to reach out to whoever is in charge of your network with these pcap files to further troubleshoot
- If you see a failure later in the handshake:
- Try the fix in the [SSLCert Fix](./sslcert.md)

30
docs/checks/nodejs.md Normal file
View File

@@ -0,0 +1,30 @@
# Node.js Connection Check
## What is this check for?
Make sure the built-in node.js has access to GitHub.com or GitHub Enterprise Server.
The runner carries its own copy of node.js executable under `<runner_root>/externals/node16/`.
All javascript base Actions will get executed by the built-in `node` at `<runner_root>/externals/node16/`.
> Not the `node` from `$PATH`
## What is checked?
- Make HTTPS GET to https://api.github.com or https://myGHES.com/api/v3 using node.js, make sure it gets 200 response code.
## How to fix the issue?
### 1. Check the common network issue
> Please check the [network doc](./network.md)
### 2. SSL certificate related issue
If you are seeing `Https request failed due to SSL cert issue` in the log, it means the `node.js` can't connect to the GitHub server due to SSL handshake failure.
> Please check the [SSL cert doc](./sslcert.md)
## Still not working?
Contact GitHub customer service or log an issue at https://github.com/actions/runner if you think it's a runner issue.

89
docs/checks/sslcert.md Normal file
View File

@@ -0,0 +1,89 @@
## SSL Certificate Related Issues
You might run into an SSL certificate error when your GitHub Enterprise Server is using a self-signed SSL server certificate or a web proxy within your network is decrypting HTTPS traffic for a security audit.
As long as your certificate is generated properly, most of the issues should be fixed after your trust the certificate properly on the runner machine.
> Different OS might have extra requirements on SSL certificate,
> Ex: macOS requires `ExtendedKeyUsage` https://support.apple.com/en-us/HT210176
### Don't skip SSL cert validation
> !!! DO NOT SKIP SSL CERT VALIDATION !!!
> !!! IT IS A BAD SECURITY PRACTICE !!!
### Download SSL certificate chain
Depends on how your SSL server certificate gets configured, you might need to download the whole certificate chain from a machine that has trusted the SSL certificate's CA.
- Approach 1: Download certificate chain using a browser (Chrome, Firefox, IT), you can google for more example, [here is what I found](https://medium.com/@menakajain/export-download-ssl-certificate-from-server-site-url-bcfc41ea46a2)
- Approach 2: Download certificate chain using OpenSSL, you can google for more example, [here is what I found](https://superuser.com/a/176721)
- Approach 3: Ask your network administrator or the owner of the CA certificate to send you a copy of it
### Trust CA certificate for the Runner
The actions runner is a dotnet core application which will follow how dotnet load SSL CA certificates on each OS.
You can get full details documentation at [here](https://docs.microsoft.com/en-us/dotnet/standard/security/cross-platform-cryptography#x509store)
In short:
- Windows: Load from Windows certificate store.
- Linux: Load from OpenSSL CA cert bundle.
- macOS: Load from macOS KeyChain.
To let the runner trusts your CA certificate, you will need to:
1. Save your SSL certificate chain which includes the root CA and all intermediate CAs into a `.pem` file.
2. Use `OpenSSL` to convert `.pem` file to a proper format for different OS, here is some [doc with sample commands](https://www.sslshopper.com/ssl-converter.html)
3. Trust CA on different OS:
- Windows: https://docs.microsoft.com/en-us/skype-sdk/sdn/articles/installing-the-trusted-root-certificate
- macOS: ![trust ca cert](./../res/macOStrustCA.gif)
- Linux: Refer to the distribution documentation
1. RedHat: https://www.redhat.com/sysadmin/ca-certificates-cli
2. Ubuntu: http://manpages.ubuntu.com/manpages/focal/man8/update-ca-certificates.8.html
3. Google search: "trust ca certificate on [linux distribution]"
4. If all approaches failed, set environment variable `SSL_CERT_FILE` to the CA bundle `.pem` file we get.
> To verify cert gets installed properly on Linux, you can try use `curl -v https://sitewithsslissue.com` and `pwsh -Command \"Invoke-WebRequest -Uri https://sitewithsslissue.com\"`
### Trust CA certificate for Git CLI
Git uses various CA bundle file depends on your operation system.
- Git packaged the CA bundle file within the Git installation on Windows
- Git use OpenSSL certificate CA bundle file on Linux and macOS
You can check where Git check CA file by running:
```bash
export GIT_CURL_VERBOSE=1
git ls-remote https://github.com/actions/runner HEAD
```
You should see something like:
```
* Couldn't find host github.com in the .netrc file; using defaults
* Trying 140.82.114.4...
* TCP_NODELAY set
* Connected to github.com (140.82.114.4) port 443 (#0)
* ALPN, offering h2
* ALPN, offering http/1.1
* successfully set certificate verify locations:
* CAfile: /etc/ssl/cert.pem
CApath: none
* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256
```
This tells me `/etc/ssl/cert.pem` is where it read trusted CA certificates.
To let Git trusts your CA certificate, you will need to:
1. Save your SSL certificate chain which includes the root CA and all intermediate CAs into a `.pem` file.
2. Set `http.sslCAInfo` Git config or `GIT_SSL_CAINFO` environment variable to the full path of the `.pem` file [Git Doc](https://git-scm.com/docs/git-config#Documentation/git-config.txt-httpsslCAInfo)
> I would recommend using `http.sslCAInfo` since it can be scope to certain hosts that need the extra trusted CA.
> Ex: `git config --global http.https://myghes.com/.sslCAInfo /extra/ca/cert.pem`
> This will make Git use the `/extra/ca/cert.pem` only when communicates with `https://myghes.com` and keep using the default CA bundle with others.
### Trust CA certificate for Node.js
Node.js has compiled a snapshot of the Mozilla CA store that is fixed at each version of Node.js' release time.
To let Node.js trusts your CA certificate, you will need to:
1. Save your SSL certificate chain which includes the root CA and all intermediate CAs into a `.pem` file.
2. Set environment variable `NODE_EXTRA_CA_CERTS` which point to the file. ex: `export NODE_EXTRA_CA_CERTS=/full/path/to/cacert.pem` or `set NODE_EXTRA_CA_CERTS=C:\full\path\to\cacert.pem`

View File

@@ -14,16 +14,43 @@ Issues in this repository should be for the runner application. Note that the V
We ask that before significant effort is put into code changes, that we have agreement on taking the change before time is invested in code changes.
1. Create a feature request. Once agreed we will take the enhancment
1. Create a feature request. Once agreed we will take the enhancement
2. Create an ADR to agree on the details of the change.
An ADR is an Architectural Decision Record. This allows consensus on the direction forward and also serves as a record of the change and motivation. [Read more here](adrs/README.md)
## Required Dev Dependencies
![Win](res/win_sm.png) ![*nix](res/linux_sm.png) Git for Windows and Linux [Install Here](https://git-scm.com/downloads) (needed for dev sh script)
![*nix](res/linux_sm.png) cURL [Install here](https://curl.se/download.html) (needed for external sh script)
![Win](res/win_sm.png) Visual Studio 2017 or newer [Install here](https://visualstudio.microsoft.com) (needed for dev sh script)
## Quickstart: Run a job from a real repository
If you just want to get from building the sourcecode to using it to execute an action, you will need:
- The url of your repository
- A runner registration token. You can find it at `https://github.com/{your-repo}/settings/actions/runners/new`
```bash
git clone https://github.com/actions/runner
cd runner/src
./dev.(sh/cmd) layout # the runner that built from source is in {root}/_layout
cd ../_layout
./config.(sh/cmd) --url https://github.com/{your-repo} --token ABCABCABCABCABCABCABCABCABCAB # accept default name, labels and work folder
./run.(sh/cmd)
```
If you trigger a job now, you can see the runner execute it.
Tip: Make sure your job can run on this runner. The easiest way is to set `runs-on: self-hosted` in the workflow file.
## Development Life Cycle
### Required Dev Dependencies
![Win](res/win_sm.png) Git for Windows [Install Here](https://git-scm.com/downloads) (needed for dev sh script)
If you're using VS Code, you can follow [these](contribute/vscode.md) steps instead.
### To Build, Test, Layout
@@ -39,10 +66,11 @@ Navigate to the `src` directory and run the following command:
* `build` (`b`): Build everything and update runner layout folder
* `test` (`t`): Build runner binaries and run unit tests
Sample developer flow:
### Sample developer flow:
```bash
git clone https://github.com/actions/runner
cd runner
cd ./src
./dev.(sh/cmd) layout # the runner that built from source is in {root}/_layout
<make code changes>
@@ -50,12 +78,81 @@ cd ./src
./dev.(sh/cmd) test # run all unit tests before git commit/push
```
### Editors
Let's break that down.
### Clone repository:
```bash
git clone https://github.com/actions/runner
cd runner
```
If you want to push your changes to a remote, it is recommended you fork the repository and use that fork as your origin instead of `https://github.com/actions/runner`.
### Build Layout:
This command will build all projects, then copies them and other dependencies into a folder called `_layout`. The binaries in this folder are then used for running, debugging the runner.
```bash
cd ./src # execute the script from this folder
./dev.(sh/cmd) layout # the runner that built from source is in {root}/_layout
```
If you make code changes after this point, use the argument `build` to build your code in the `src` folder to keep your `_layout` folder up to date.
```bash
cd ./src
./dev.(sh/cmd) build # {root}/_layout will get updated
```
### Test Layout:
This command runs the suite of unit tests in the project
```bash
cd ./src
./dev.(sh/cmd) test # run all unit tests before git commit/push
```
### Configure Runner:
If you want to manually test your runner and run actions from a real repository, you'll have to configure it before running it.
```bash
cd runner/_layout
./config.(sh/cmd) # configure your custom runner
```
You will need your the name of your repository and a runner registration token.
Check [Quickstart](##Quickstart:-Run-a-job-from-a-real-repository) if you don't know how to get this token.
These can also be passed down as arguments to `config.(sh/cmd)`:
```bash
cd runner/_layout
./config.(sh/cmd) --url https://github.com/{your-repo} --token ABCABCABCABCABCABCABCABCABCAB
```
### Run Runner
All that's left to do is to start the runner:
```bash
cd runner/_layout
./run.(sh/cmd) # run your custom runner
```
### View logs:
```bash
cd runner/_layout/_diag
ls
cat (Runner/Worker)_TIMESTAMP.log # view your log file
```
## Editors
[Using Visual Studio Code](https://code.visualstudio.com/)
[Using Visual Studio 2019](https://www.visualstudio.com/vs/)
[Using Visual Studio](https://code.visualstudio.com/docs)
### Styling
## Styling
We use the .NET Foundation and CoreCLR style guidelines [located here](
https://github.com/dotnet/corefx/blob/master/Documentation/coding-guidelines/coding-style.md)

52
docs/contribute/vscode.md Normal file
View File

@@ -0,0 +1,52 @@
# Development Life Cycle using VS Code:
These examples use VS Code, but the idea should be similar across all IDEs as long as you attach to the same processes in the right folder.
## Configure
To successfully start the runner, you need to register it using a repository and a runner registration token.
Run `Configure` first to build the source code and set up the runner in `_layout`.
Once it's done creating `_layout`, it asks for the url of your repository and your token in the terminal.
Check [Quickstart](../contribute.md#quickstart-run-a-job-from-a-real-repository) if you don't know how to get this token.
## Debugging
Debugging the full lifecycle of a job can be tricky, because there are multiple processes involved.
All the configs below can be found in `.vscode/launch.json`.
## Debug the Listener
```json
{
"name": "Run [build]",
"type": "coreclr",
"request": "launch",
"preLaunchTask": "build runner layout", // use the config called "Run" to launch without rebuild
"program": "${workspaceFolder}/_layout/bin/Runner.Listener",
"args": [
"run" // run without args to print usage
],
"cwd": "${workspaceFolder}/src",
"console": "integratedTerminal",
"requireExactSource": false,
}
```
If you launch `Run` or `Run [build]`, it starts a process called `Runner.Listener`.
This process will receive any job queued on this repository if the job runs on matching labels (e.g `runs-on: self-hosted`).
Once a job is received, a `Runner.Listener` starts a new process of `Runner.Worker`.
Since this is a diferent process, you can't use the same debugger session debug it.
Instead, a parallel debugging session has to be started, using a different launch config.
Luckily, VS Code supports multiple parallel debugging sessions.
## Debug the Worker
Because the worker process is usually started by the listener instead of an IDE, debugging it from start to finish can be tricky.
For this reason, `Runner.Worker` can be configured to wait for a debugger to be attached before it begins any actual work.
Set the environment variable `GITHUB_ACTIONS_RUNNER_ATTACH_DEBUGGER` to `true` or `1` to enable this wait.
All worker processes now will wait 20 seconds before they start working on their task.
This gives enough time to attach a debugger by running `Debug Worker`.
If for some reason you have multiple workers running, run the launch config `Attach` instead.
Select `Runner.Worker` from the running processes when VS Code prompts for it.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 158 KiB

After

Width:  |  Height:  |  Size: 138 KiB

BIN
docs/res/macOStrustCA.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 MiB

View File

@@ -23,8 +23,8 @@ You might see something like this which indicate a dependency's missing.
./config.sh
libunwind.so.8 => not found
libunwind-x86_64.so.8 => not found
Dependencies is missing for Dotnet Core 3.0
Execute ./bin/installdependencies.sh to install any missing Dotnet Core 3.0 dependencies.
Dependencies is missing for Dotnet Core 6.0
Execute ./bin/installdependencies.sh to install any missing Dotnet Core 6.0 dependencies.
```
You can easily correct the problem by executing `./bin/installdependencies.sh`.
The `installdependencies.sh` script should install all required dependencies on all supported Linux versions
@@ -34,13 +34,13 @@ The `installdependencies.sh` script should install all required dependencies on
Debian based OS (Debian, Ubuntu, Linux Mint)
- liblttng-ust0
- liblttng-ust1 or liblttng-ust0
- libkrb5-3
- zlib1g
- libssl1.1, libssl1.0.2 or libssl1.0.0
- libicu63, libicu60, libicu57 or libicu55
Fedora based OS (Fedora, Redhat, Centos, Oracle Linux 7)
Fedora based OS (Fedora, Red Hat Enterprise Linux, CentOS, Oracle Linux 7)
- lttng-ust
- openssl-libs

View File

@@ -5,6 +5,6 @@
## Supported Versions
- macOS High Sierra (10.13) and later versions
- x64 and arm64 (Apple Silicon)
## [More .Net Core Prerequisites Information](https://docs.microsoft.com/en-us/dotnet/core/macos-prerequisites?tabs=netcore30)

View File

@@ -1,67 +1,112 @@
## Features
- Runner support for GHES Alpha (#381 #386 #390 #393 $401)
- Allow secrets context in Container.env (#388)
- Added support for a JIT runner config (#1925)
- Added `ACTIONS_RUNNER_FORCE_ACTIONS_NODE_VERSION` env option to force actions to run on a specific node version (#1913)
## Bugs
- Raise warning when volume mount root. (#413)
- Fix typo (#394)
- Fixed a bug where container hooks passed in path as a string rather then an array of strings (#1948)
## Misc
- N/A
- Minor cleanup of error messages when running container hooks (#1949)
## Windows x64
We recommend configuring the runner in a root folder of the Windows drive (e.g. "C:\actions-runner"). This will help avoid issues related to service identity folder permissions and long file path restrictions on Windows
```
// Create a folder under the drive root
We recommend configuring the runner in a root folder of the Windows drive (e.g. "C:\actions-runner"). This will help avoid issues related to service identity folder permissions and long file path restrictions on Windows.
The following snipped needs to be run on `powershell`:
``` powershell
# Create a folder under the drive root
mkdir \actions-runner ; cd \actions-runner
// Download the latest runner package
# Download the latest runner package
Invoke-WebRequest -Uri https://github.com/actions/runner/releases/download/v<RUNNER_VERSION>/actions-runner-win-x64-<RUNNER_VERSION>.zip -OutFile actions-runner-win-x64-<RUNNER_VERSION>.zip
// Extract the installer
# Extract the installer
Add-Type -AssemblyName System.IO.Compression.FileSystem ;
[System.IO.Compression.ZipFile]::ExtractToDirectory("$PWD\actions-runner-win-x64-<RUNNER_VERSION>.zip", "$PWD")
```
## OSX
## OSX x64
``` bash
// Create a folder
# Create a folder
mkdir actions-runner && cd actions-runner
// Download the latest runner package
# Download the latest runner package
curl -O -L https://github.com/actions/runner/releases/download/v<RUNNER_VERSION>/actions-runner-osx-x64-<RUNNER_VERSION>.tar.gz
// Extract the installer
# Extract the installer
tar xzf ./actions-runner-osx-x64-<RUNNER_VERSION>.tar.gz
```
## [Pre-release] OSX arm64 (Apple silicon)
``` bash
# Create a folder
mkdir actions-runner && cd actions-runner
# Download the latest runner package
curl -O -L https://github.com/actions/runner/releases/download/v<RUNNER_VERSION>/actions-runner-osx-arm64-<RUNNER_VERSION>.tar.gz
# Extract the installer
tar xzf ./actions-runner-osx-arm64-<RUNNER_VERSION>.tar.gz
```
## Linux x64
``` bash
// Create a folder
# Create a folder
mkdir actions-runner && cd actions-runner
// Download the latest runner package
# Download the latest runner package
curl -O -L https://github.com/actions/runner/releases/download/v<RUNNER_VERSION>/actions-runner-linux-x64-<RUNNER_VERSION>.tar.gz
// Extract the installer
# Extract the installer
tar xzf ./actions-runner-linux-x64-<RUNNER_VERSION>.tar.gz
```
## Linux arm64 (Pre-release)
## Linux arm64
``` bash
// Create a folder
# Create a folder
mkdir actions-runner && cd actions-runner
// Download the latest runner package
# Download the latest runner package
curl -O -L https://github.com/actions/runner/releases/download/v<RUNNER_VERSION>/actions-runner-linux-arm64-<RUNNER_VERSION>.tar.gz
// Extract the installer
# Extract the installer
tar xzf ./actions-runner-linux-arm64-<RUNNER_VERSION>.tar.gz
```
## Linux arm (Pre-release)
## Linux arm
``` bash
// Create a folder
# Create a folder
mkdir actions-runner && cd actions-runner
// Download the latest runner package
# Download the latest runner package
curl -O -L https://github.com/actions/runner/releases/download/v<RUNNER_VERSION>/actions-runner-linux-arm-<RUNNER_VERSION>.tar.gz
// Extract the installer
# Extract the installer
tar xzf ./actions-runner-linux-arm-<RUNNER_VERSION>.tar.gz
```
## Using your self hosted runner
For additional details about configuring, running, or shutting down the runner please check out our [product docs.](https://help.github.com/en/actions/automating-your-workflow-with-github-actions/adding-self-hosted-runners)
## SHA-256 Checksums
The SHA-256 checksums for the packages included in this build are shown below:
- actions-runner-win-x64-<RUNNER_VERSION>.zip <!-- BEGIN SHA win-x64 --><WIN_X64_SHA><!-- END SHA win-x64 -->
- actions-runner-osx-x64-<RUNNER_VERSION>.tar.gz <!-- BEGIN SHA osx-x64 --><OSX_X64_SHA><!-- END SHA osx-x64 -->
- actions-runner-osx-arm64-<RUNNER_VERSION>.tar.gz <!-- BEGIN SHA osx-arm64 --><OSX_ARM64_SHA><!-- END SHA osx-arm64 -->
- actions-runner-linux-x64-<RUNNER_VERSION>.tar.gz <!-- BEGIN SHA linux-x64 --><LINUX_X64_SHA><!-- END SHA linux-x64 -->
- actions-runner-linux-arm64-<RUNNER_VERSION>.tar.gz <!-- BEGIN SHA linux-arm64 --><LINUX_ARM64_SHA><!-- END SHA linux-arm64 -->
- actions-runner-linux-arm-<RUNNER_VERSION>.tar.gz <!-- BEGIN SHA linux-arm --><LINUX_ARM_SHA><!-- END SHA linux-arm -->
- actions-runner-win-x64-<RUNNER_VERSION>-noexternals.zip <!-- BEGIN SHA win-x64_noexternals --><WIN_X64_SHA_NOEXTERNALS><!-- END SHA win-x64_noexternals -->
- actions-runner-osx-x64-<RUNNER_VERSION>-noexternals.tar.gz <!-- BEGIN SHA osx-x64_noexternals --><OSX_X64_SHA_NOEXTERNALS><!-- END SHA osx-x64_noexternals -->
- actions-runner-osx-arm64-<RUNNER_VERSION>-noexternals.tar.gz <!-- BEGIN SHA osx-arm64_noexternals --><OSX_ARM64_SHA_NOEXTERNALS><!-- END SHA osx-arm64_noexternals -->
- actions-runner-linux-x64-<RUNNER_VERSION>-noexternals.tar.gz <!-- BEGIN SHA linux-x64_noexternals --><LINUX_X64_SHA_NOEXTERNALS><!-- END SHA linux-x64_noexternals -->
- actions-runner-linux-arm64-<RUNNER_VERSION>-noexternals.tar.gz <!-- BEGIN SHA linux-arm64_noexternals --><LINUX_ARM64_SHA_NOEXTERNALS><!-- END SHA linux-arm64_noexternals -->
- actions-runner-linux-arm-<RUNNER_VERSION>-noexternals.tar.gz <!-- BEGIN SHA linux-arm_noexternals --><LINUX_ARM_SHA_NOEXTERNALS><!-- END SHA linux-arm_noexternals -->
- actions-runner-win-x64-<RUNNER_VERSION>-noruntime.zip <!-- BEGIN SHA win-x64_noruntime --><WIN_X64_SHA_NORUNTIME><!-- END SHA win-x64_noruntime -->
- actions-runner-osx-x64-<RUNNER_VERSION>-noruntime.tar.gz <!-- BEGIN SHA osx-x64_noruntime --><OSX_X64_SHA_NORUNTIME><!-- END SHA osx-x64_noruntime -->
- actions-runner-osx-arm64-<RUNNER_VERSION>-noruntime.tar.gz <!-- BEGIN SHA osx-arm64_noruntime --><OSX_ARM64_SHA_NORUNTIME><!-- END SHA osx-arm64_noruntime -->
- actions-runner-linux-x64-<RUNNER_VERSION>-noruntime.tar.gz <!-- BEGIN SHA linux-x64_noruntime --><LINUX_X64_SHA_NORUNTIME><!-- END SHA linux-x64_noruntime -->
- actions-runner-linux-arm64-<RUNNER_VERSION>-noruntime.tar.gz <!-- BEGIN SHA linux-arm64_noruntime --><LINUX_ARM64_SHA_NORUNTIME><!-- END SHA linux-arm64_noruntime -->
- actions-runner-linux-arm-<RUNNER_VERSION>-noruntime.tar.gz <!-- BEGIN SHA linux-arm_noruntime --><LINUX_ARM_SHA_NORUNTIME><!-- END SHA linux-arm_noruntime -->
- actions-runner-win-x64-<RUNNER_VERSION>-noruntime-noexternals.zip <!-- BEGIN SHA win-x64_noruntime_noexternals --><WIN_X64_SHA_NORUNTIME_NOEXTERNALS><!-- END SHA win-x64_noruntime_noexternals -->
- actions-runner-osx-x64-<RUNNER_VERSION>-noruntime-noexternals.tar.gz <!-- BEGIN SHA osx-x64_noruntime_noexternals --><OSX_X64_SHA_NORUNTIME_NOEXTERNALS><!-- END SHA osx-x64_noruntime_noexternals -->
- actions-runner-osx-arm64-<RUNNER_VERSION>-noruntime-noexternals.tar.gz <!-- BEGIN SHA osx-arm64_noruntime_noexternals --><OSX_ARM64_SHA_NORUNTIME_NOEXTERNALS><!-- END SHA osx-arm64_noruntime_noexternals -->
- actions-runner-linux-x64-<RUNNER_VERSION>-noruntime-noexternals.tar.gz <!-- BEGIN SHA linux-x64_noruntime_noexternals --><LINUX_X64_SHA_NORUNTIME_NOEXTERNALS><!-- END SHA linux-x64_noruntime_noexternals -->
- actions-runner-linux-arm64-<RUNNER_VERSION>-noruntime-noexternals.tar.gz <!-- BEGIN SHA linux-arm64_noruntime_noexternals --><LINUX_ARM64_SHA_NORUNTIME_NOEXTERNALS><!-- END SHA linux-arm64_noruntime_noexternals -->
- actions-runner-linux-arm-<RUNNER_VERSION>-noruntime-noexternals.tar.gz <!-- BEGIN SHA linux-arm_noruntime_noexternals --><LINUX_ARM_SHA_NORUNTIME_NOEXTERNALS><!-- END SHA linux-arm_noruntime_noexternals -->

View File

@@ -1 +1 @@
2.168.0
2.294.0

4
scripts/README.md Normal file
View File

@@ -0,0 +1,4 @@
# Sample scripts for self-hosted runners
Here are some examples to work from if you'd like to automate your use of self-hosted runners.
See the docs [here](../docs/automate.md).

186
scripts/create-latest-svc.sh Executable file
View File

@@ -0,0 +1,186 @@
#/bin/bash
set -e
# Notes:
# PATS over envvars are more secure
# Downloads latest runner release (not pre-release)
# Configures it as a service more secure
# Should be used on VMs and not containers
# Works on OSX and Linux
# Assumes x64 arch
# See EXAMPLES below
flags_found=false
while getopts 's:g:n:r:u:l:' opt; do
flags_found=true
case $opt in
s)
runner_scope=$OPTARG
;;
g)
ghe_hostname=$OPTARG
;;
n)
runner_name=$OPTARG
;;
r)
runner_group=$OPTARG
;;
u)
svc_user=$OPTARG
;;
l)
labels=$OPTARG
;;
*)
echo "
Runner Service Installer
Examples:
RUNNER_CFG_PAT=<yourPAT> ./create-latest-svc.sh myuser/myrepo my.ghe.deployment.net
RUNNER_CFG_PAT=<yourPAT> ./create-latest-svc.sh -s myorg -u user_name -l label1,label2
Usage:
export RUNNER_CFG_PAT=<yourPAT>
./create-latest-svc scope [ghe_domain] [name] [user] [labels]
-s required scope: repo (:owner/:repo) or org (:organization)
-g optional ghe_hostname: the fully qualified domain name of your GitHub Enterprise Server deployment
-n optional name of the runner, defaults to hostname
-r optional name of the runner group to add the runner to, defaults to the Default group
-u optional user svc will run as, defaults to current
-l optional list of labels (split by comma) applied on the runner"
exit 0
;;
esac
done
shift "$((OPTIND - 1))"
if ! "$flags_found"; then
runner_scope=${1}
ghe_hostname=${2}
runner_name=${3:-$(hostname)}
svc_user=${4:-$USER}
labels=${5}
runner_group=${6}
fi
# apply defaults
runner_name=${runner_name:-$(hostname)}
svc_user=${svc_user:-$USER}
echo "Configuring runner @ ${runner_scope}"
sudo echo
#---------------------------------------
# Validate Environment
#---------------------------------------
runner_plat=linux
[ ! -z "$(which sw_vers)" ] && runner_plat=osx;
function fatal()
{
echo "error: $1" >&2
exit 1
}
if [ -z "${runner_scope}" ]; then fatal "supply scope as argument 1"; fi
if [ -z "${RUNNER_CFG_PAT}" ]; then fatal "RUNNER_CFG_PAT must be set before calling"; fi
which curl || fatal "curl required. Please install in PATH with apt-get, brew, etc"
which jq || fatal "jq required. Please install in PATH with apt-get, brew, etc"
# bail early if there's already a runner there. also sudo early
if [ -d ./runner ]; then
fatal "Runner already exists. Use a different directory or delete ./runner"
fi
sudo -u ${svc_user} mkdir runner
# TODO: validate not in a container
# TODO: validate systemd or osx svc installer
#--------------------------------------
# Get a config token
#--------------------------------------
echo
echo "Generating a registration token..."
base_api_url="https://api.github.com"
if [ -n "${ghe_hostname}" ]; then
base_api_url="https://${ghe_hostname}/api/v3"
fi
# if the scope has a slash, it's a repo runner
orgs_or_repos="orgs"
if [[ "$runner_scope" == *\/* ]]; then
orgs_or_repos="repos"
fi
export RUNNER_TOKEN=$(curl -s -X POST ${base_api_url}/${orgs_or_repos}/${runner_scope}/actions/runners/registration-token -H "accept: application/vnd.github.everest-preview+json" -H "authorization: token ${RUNNER_CFG_PAT}" | jq -r '.token')
if [ "null" == "$RUNNER_TOKEN" -o -z "$RUNNER_TOKEN" ]; then fatal "Failed to get a token"; fi
#---------------------------------------
# Download latest released and extract
#---------------------------------------
echo
echo "Downloading latest runner ..."
# For the GHES Alpha, download the runner from github.com
latest_version_label=$(curl -s -X GET 'https://api.github.com/repos/actions/runner/releases/latest' | jq -r '.tag_name')
latest_version=$(echo ${latest_version_label:1})
runner_file="actions-runner-${runner_plat}-x64-${latest_version}.tar.gz"
if [ -f "${runner_file}" ]; then
echo "${runner_file} exists. skipping download."
else
runner_url="https://github.com/actions/runner/releases/download/${latest_version_label}/${runner_file}"
echo "Downloading ${latest_version_label} for ${runner_plat} ..."
echo $runner_url
curl -O -L ${runner_url}
fi
ls -la *.tar.gz
#---------------------------------------------------
# extract to runner directory in this directory
#---------------------------------------------------
echo
echo "Extracting ${runner_file} to ./runner"
tar xzf "./${runner_file}" -C runner
# export of pass
sudo chown -R $svc_user ./runner
pushd ./runner
#---------------------------------------
# Unattend config
#---------------------------------------
runner_url="https://github.com/${runner_scope}"
if [ -n "${ghe_hostname}" ]; then
runner_url="https://${ghe_hostname}/${runner_scope}"
fi
echo
echo "Configuring ${runner_name} @ $runner_url"
echo "./config.sh --unattended --url $runner_url --token *** --name $runner_name ${labels:+--labels $labels} ${runner_group:+--runnergroup \"$runner_group\"}"
sudo -E -u ${svc_user} ./config.sh --unattended --url $runner_url --token $RUNNER_TOKEN --name $runner_name ${labels:+--labels $labels} ${runner_group:+--runnergroup "$runner_group"}
#---------------------------------------
# Configuring as a service
#---------------------------------------
echo
echo "Configuring as a service ..."
prefix=""
if [ "${runner_plat}" == "linux" ]; then
prefix="sudo "
fi
${prefix}./svc.sh install ${svc_user}
${prefix}./svc.sh start

83
scripts/delete.sh Executable file
View File

@@ -0,0 +1,83 @@
#/bin/bash
set -e
#
# Force deletes a runner from the service
# The caller should have already ensured the runner is gone and/or stopped
#
# Examples:
# RUNNER_CFG_PAT=<yourPAT> ./delete.sh myuser/myrepo myname
# RUNNER_CFG_PAT=<yourPAT> ./delete.sh myorg
#
# Usage:
# export RUNNER_CFG_PAT=<yourPAT>
# ./delete.sh scope name
#
# scope required repo (:owner/:repo) or org (:organization)
# name optional defaults to hostname. name to delete
#
# Notes:
# PATS over envvars are more secure
# Works on OSX and Linux
# Assumes x64 arch
#
runner_scope=${1}
runner_name=${2}
echo "Deleting runner ${runner_name} @ ${runner_scope}"
function fatal()
{
echo "error: $1" >&2
exit 1
}
if [ -z "${runner_scope}" ]; then fatal "supply scope as argument 1"; fi
if [ -z "${runner_name}" ]; then fatal "supply name as argument 2"; fi
if [ -z "${RUNNER_CFG_PAT}" ]; then fatal "RUNNER_CFG_PAT must be set before calling"; fi
which curl || fatal "curl required. Please install in PATH with apt-get, brew, etc"
which jq || fatal "jq required. Please install in PATH with apt-get, brew, etc"
base_api_url="https://api.github.com/orgs"
if [[ "$runner_scope" == *\/* ]]; then
base_api_url="https://api.github.com/repos"
fi
#--------------------------------------
# Ensure offline
#--------------------------------------
runner_status=$(curl -s -X GET ${base_api_url}/${runner_scope}/actions/runners?per_page=100 -H "accept: application/vnd.github.everest-preview+json" -H "authorization: token ${RUNNER_CFG_PAT}" \
| jq -M -j ".runners | .[] | select(.name == \"${runner_name}\") | .status")
if [ -z "${runner_status}" ]; then
fatal "Could not find runner with name ${runner_name}"
fi
echo "Status: ${runner_status}"
if [ "${runner_status}" != "offline" ]; then
fatal "Runner should be offline before removing"
fi
#--------------------------------------
# Get id of runner to remove
#--------------------------------------
runner_id=$(curl -s -X GET ${base_api_url}/${runner_scope}/actions/runners?per_page=100 -H "accept: application/vnd.github.everest-preview+json" -H "authorization: token ${RUNNER_CFG_PAT}" \
| jq -M -j ".runners | .[] | select(.name == \"${runner_name}\") | .id")
if [ -z "${runner_id}" ]; then
fatal "Could not find runner with name ${runner_name}"
fi
echo "Removing id ${runner_id}"
#--------------------------------------
# Remove the runner
#--------------------------------------
curl -s -X DELETE ${base_api_url}/${runner_scope}/actions/runners/${runner_id} -H "authorization: token ${RUNNER_CFG_PAT}"
echo "Done."

76
scripts/remove-svc.sh Executable file
View File

@@ -0,0 +1,76 @@
#/bin/bash
set -e
#
# Removes a runner running as a service
# Must be run on the machine where the service is run
#
# Examples:
# RUNNER_CFG_PAT=<yourPAT> ./remove-svc.sh myuser/myrepo
# RUNNER_CFG_PAT=<yourPAT> ./remove-svc.sh myorg
#
# Usage:
# export RUNNER_CFG_PAT=<yourPAT>
# ./remove-svc scope name
#
# scope required repo (:owner/:repo) or org (:organization)
# name optional defaults to hostname. name to uninstall and remove
#
# Notes:
# PATS over envvars are more secure
# Should be used on VMs and not containers
# Works on OSX and Linux
# Assumes x64 arch
#
runner_scope=${1}
runner_name=${2:-$(hostname)}
echo "Uninstalling runner ${runner_name} @ ${runner_scope}"
sudo echo
function fatal()
{
echo "error: $1" >&2
exit 1
}
if [ -z "${runner_scope}" ]; then fatal "supply scope as argument 1"; fi
if [ -z "${RUNNER_CFG_PAT}" ]; then fatal "RUNNER_CFG_PAT must be set before calling"; fi
which curl || fatal "curl required. Please install in PATH with apt-get, brew, etc"
which jq || fatal "jq required. Please install in PATH with apt-get, brew, etc"
runner_plat=linux
[ ! -z "$(which sw_vers)" ] && runner_plat=osx;
#--------------------------------------
# Get a remove token
#--------------------------------------
echo
echo "Generating a removal token..."
# if the scope has a slash, it's an repo runner
base_api_url="https://api.github.com/orgs"
if [[ "$runner_scope" == *\/* ]]; then
base_api_url="https://api.github.com/repos"
fi
export REMOVE_TOKEN=$(curl -s -X POST ${base_api_url}/${runner_scope}/actions/runners/remove-token -H "accept: application/vnd.github.everest-preview+json" -H "authorization: token ${RUNNER_CFG_PAT}" | jq -r '.token')
if [ -z "$REMOVE_TOKEN" ]; then fatal "Failed to get a token"; fi
#---------------------------------------
# Stop and uninstall the service
#---------------------------------------
echo
echo "Uninstall the service ..."
pushd ./runner
prefix=""
if [ "${runner_plat}" == "linux" ]; then
prefix="sudo "
fi
${prefix}./svc.sh stop
${prefix}./svc.sh uninstall
./config.sh remove --token $REMOVE_TOKEN

10
src/.editorconfig Normal file
View File

@@ -0,0 +1,10 @@
[*.cs]
charset = utf-8
insert_final_newline = true
csharp_new_line_before_else = true
csharp_new_line_before_catch = true
csharp_new_line_before_finally = true
csharp_new_line_before_open_brace = all
csharp_space_after_keywords_in_control_flow_statements = true

View File

@@ -1,4 +1,4 @@

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Version 16
VisualStudioVersion = 16.0.29411.138
@@ -21,6 +21,11 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Sdk", "Sdk\Sdk.csproj", "{D
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Test", "Test\Test.csproj", "{C932061F-F6A1-4F1E-B854-A6C6B30DC3EF}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{EFB254FC-7927-445E-BA64-6676ADB309E9}"
ProjectSection(SolutionItems) = preProject
.editorconfig = .editorconfig
EndProjectSection
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU

View File

@@ -25,9 +25,12 @@
<DefineConstants>$(DefineConstants);X86</DefineConstants>
</PropertyGroup>
<PropertyGroup Condition="'$(BUILD_OS)' == 'OSX'">
<PropertyGroup Condition="'$(BUILD_OS)' == 'OSX' AND '$(PackageRuntime)' == 'osx-x64'">
<DefineConstants>$(DefineConstants);X64</DefineConstants>
</PropertyGroup>
<PropertyGroup Condition="'$(BUILD_OS)' == 'OSX' AND '$(PackageRuntime)' == 'osx-arm64'">
<DefineConstants>$(DefineConstants);ARM64</DefineConstants>
</PropertyGroup>
<PropertyGroup Condition="'$(BUILD_OS)' == 'Linux' AND ('$(PackageRuntime)' == 'linux-x64' OR '$(PackageRuntime)' == '')">
<DefineConstants>$(DefineConstants);X64</DefineConstants>

View File

@@ -0,0 +1 @@
1d709d93e5d3c6c6c656a61aa6c1781050224788a05b0e6ecc4c3c0408bdf89c

View File

@@ -0,0 +1 @@
b92a47cfeaad02255b1f7a377060651b73ae5e5db22a188dbbcb4183ab03a03d

View File

@@ -0,0 +1 @@
68a9a8ef0843a8bb74241894f6f63fd76241a82295c5337d3cc7a940a314c78e

View File

@@ -0,0 +1 @@
02c7126ff4d63ee2a0ae390c81434c125630522aadf35903bbeebb1a99d8af99

View File

@@ -0,0 +1 @@
c9d5a542f8d765168855a89e83ae0a8970d00869041c4f9a766651c04c72b212

View File

@@ -0,0 +1 @@
d94f2fbaf210297162bc9f3add819d73682c3aa6899e321c3872412b924d5504

View File

@@ -0,0 +1 @@
6ed30a2c1ee403a610d63e82bb230b9ba846a9c25cec9e4ea8672fb6ed4e1a51

View File

@@ -0,0 +1 @@
711c30c51ec52c9b7a9a2eb399d6ab2ab5ee1dc72de11879f2f36f919f163d78

View File

@@ -0,0 +1 @@
a49479ca4b4988a06c097e8d22c51fd08a11c13f40807366236213d0e008cf6a

View File

@@ -0,0 +1 @@
cc4708962a80325de0baa5ae8484e0cb9ae976ac6a4178c1c0d448b8c52bd7f7

View File

@@ -0,0 +1 @@
8e97df75230b843462a9b4c578ccec604ee4b4a1066120c85b04374317fa372b

View File

@@ -0,0 +1 @@
f75a671e5a188c76680739689aa75331a2c09d483dce9c80023518c48fd67a18

View File

@@ -23,8 +23,6 @@
Default: latest
Represents a build version on specific channel. Possible values:
- latest - most latest build on specific channel
- coherent - most latest coherent build on specific channel
coherent applies only to SDK downloads
- 3-part version in a format A.B.C - represents specific version of build
examples: 2.0.0-preview2-006120, 1.1.0
.PARAMETER InstallDir
@@ -69,6 +67,8 @@
.PARAMETER ProxyUseDefaultCredentials
Default: false
Use default credentials, when using proxy address.
.PARAMETER ProxyBypassList
If set with ProxyAddress, will provide the list of comma separated urls that will bypass the proxy
.PARAMETER SkipNonVersionedFiles
Default: false
Skips installing non-versioned files if they already exist, such as dotnet.exe.
@@ -96,6 +96,7 @@ param(
[string]$FeedCredential,
[string]$ProxyAddress,
[switch]$ProxyUseDefaultCredentials,
[string[]]$ProxyBypassList=@(),
[switch]$SkipNonVersionedFiles,
[switch]$NoCdn
)
@@ -119,11 +120,45 @@ $VersionRegEx="/\d+\.\d+[^/]+/"
$OverrideNonVersionedFiles = !$SkipNonVersionedFiles
function Say($str) {
try {
Write-Host "dotnet-install: $str"
}
catch {
# Some platforms cannot utilize Write-Host (Azure Functions, for instance). Fall back to Write-Output
Write-Output "dotnet-install: $str"
}
}
function Say-Warning($str) {
try {
Write-Warning "dotnet-install: $str"
}
catch {
# Some platforms cannot utilize Write-Warning (Azure Functions, for instance). Fall back to Write-Output
Write-Output "dotnet-install: Warning: $str"
}
}
# Writes a line with error style settings.
# Use this function to show a human-readable comment along with an exception.
function Say-Error($str) {
try {
# Write-Error is quite oververbose for the purpose of the function, let's write one line with error style settings.
$Host.UI.WriteErrorLine("dotnet-install: $str")
}
catch {
Write-Output "dotnet-install: Error: $str"
}
}
function Say-Verbose($str) {
try {
Write-Verbose "dotnet-install: $str"
}
catch {
# Some platforms cannot utilize Write-Verbose (Azure Functions, for instance). Fall back to Write-Output
Write-Output "dotnet-install: $str"
}
}
function Say-Invocation($Invocation) {
@@ -137,7 +172,7 @@ function Invoke-With-Retry([ScriptBlock]$ScriptBlock, [int]$MaxAttempts = 3, [in
while ($true) {
try {
return $ScriptBlock.Invoke()
return & $ScriptBlock
}
catch {
$Attempts++
@@ -154,7 +189,16 @@ function Invoke-With-Retry([ScriptBlock]$ScriptBlock, [int]$MaxAttempts = 3, [in
function Get-Machine-Architecture() {
Say-Invocation $MyInvocation
# possible values: amd64, x64, x86, arm64, arm
# On PS x86, PROCESSOR_ARCHITECTURE reports x86 even on x64 systems.
# To get the correct architecture, we need to use PROCESSOR_ARCHITEW6432.
# PS x64 doesn't define this, so we fall back to PROCESSOR_ARCHITECTURE.
# Possible values: amd64, x64, x86, arm64, arm
if( $ENV:PROCESSOR_ARCHITEW6432 -ne $null )
{
return $ENV:PROCESSOR_ARCHITEW6432
}
return $ENV:PROCESSOR_ARCHITECTURE
}
@@ -167,7 +211,7 @@ function Get-CLIArchitecture-From-Architecture([string]$Architecture) {
{ $_ -eq "x86" } { return "x86" }
{ $_ -eq "arm" } { return "arm" }
{ $_ -eq "arm64" } { return "arm64" }
default { throw "Architecture not supported. If you think this is a bug, report it at https://github.com/dotnet/sdk/issues" }
default { throw "Architecture '$Architecture' not supported. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues" }
}
}
@@ -228,7 +272,11 @@ function GetHTTPResponse([Uri] $Uri)
if($ProxyAddress) {
$HttpClientHandler = New-Object System.Net.Http.HttpClientHandler
$HttpClientHandler.Proxy = New-Object System.Net.WebProxy -Property @{Address=$ProxyAddress;UseDefaultCredentials=$ProxyUseDefaultCredentials}
$HttpClientHandler.Proxy = New-Object System.Net.WebProxy -Property @{
Address=$ProxyAddress;
UseDefaultCredentials=$ProxyUseDefaultCredentials;
BypassList = $ProxyBypassList;
}
$HttpClient = New-Object System.Net.Http.HttpClient -ArgumentList $HttpClientHandler
}
else {
@@ -238,19 +286,42 @@ function GetHTTPResponse([Uri] $Uri)
# Default timeout for HttpClient is 100s. For a 50 MB download this assumes 500 KB/s average, any less will time out
# 20 minutes allows it to work over much slower connections.
$HttpClient.Timeout = New-TimeSpan -Minutes 20
$Response = $HttpClient.GetAsync("${Uri}${FeedCredential}").Result
if (($Response -eq $null) -or (-not ($Response.IsSuccessStatusCode))) {
$Task = $HttpClient.GetAsync("${Uri}${FeedCredential}").ConfigureAwait("false");
$Response = $Task.GetAwaiter().GetResult();
if (($null -eq $Response) -or (-not ($Response.IsSuccessStatusCode))) {
# The feed credential is potentially sensitive info. Do not log FeedCredential to console output.
$ErrorMsg = "Failed to download $Uri."
if ($Response -ne $null) {
$ErrorMsg += " $Response"
$DownloadException = [System.Exception] "Unable to download $Uri."
if ($null -ne $Response) {
$DownloadException.Data["StatusCode"] = [int] $Response.StatusCode
$DownloadException.Data["ErrorMessage"] = "Unable to download $Uri. Returned HTTP status code: " + $DownloadException.Data["StatusCode"]
}
throw $ErrorMsg
throw $DownloadException
}
return $Response
}
catch [System.Net.Http.HttpRequestException] {
$DownloadException = [System.Exception] "Unable to download $Uri."
# Pick up the exception message and inner exceptions' messages if they exist
$CurrentException = $PSItem.Exception
$ErrorMsg = $CurrentException.Message + "`r`n"
while ($CurrentException.InnerException) {
$CurrentException = $CurrentException.InnerException
$ErrorMsg += $CurrentException.Message + "`r`n"
}
# Check if there is an issue concerning TLS.
if ($ErrorMsg -like "*SSL/TLS*") {
$ErrorMsg += "Ensure that TLS 1.2 or higher is enabled to use this script.`r`n"
}
$DownloadException.Data["ErrorMessage"] = $ErrorMsg
throw $DownloadException
}
finally {
if ($HttpClient -ne $null) {
$HttpClient.Dispose()
@@ -259,7 +330,7 @@ function GetHTTPResponse([Uri] $Uri)
})
}
function Get-Latest-Version-Info([string]$AzureFeed, [string]$Channel, [bool]$Coherent) {
function Get-Latest-Version-Info([string]$AzureFeed, [string]$Channel) {
Say-Invocation $MyInvocation
$VersionFileUrl = $null
@@ -269,18 +340,12 @@ function Get-Latest-Version-Info([string]$AzureFeed, [string]$Channel, [bool]$Co
elseif ($Runtime -eq "aspnetcore") {
$VersionFileUrl = "$UncachedFeed/aspnetcore/Runtime/$Channel/latest.version"
}
# Currently, the WindowsDesktop runtime is manufactured with the .Net core runtime
elseif ($Runtime -eq "windowsdesktop") {
$VersionFileUrl = "$UncachedFeed/Runtime/$Channel/latest.version"
$VersionFileUrl = "$UncachedFeed/WindowsDesktop/$Channel/latest.version"
}
elseif (-not $Runtime) {
if ($Coherent) {
$VersionFileUrl = "$UncachedFeed/Sdk/$Channel/latest.coherent.version"
}
else {
$VersionFileUrl = "$UncachedFeed/Sdk/$Channel/latest.version"
}
}
else {
throw "Invalid value for `$Runtime"
}
@@ -288,7 +353,8 @@ function Get-Latest-Version-Info([string]$AzureFeed, [string]$Channel, [bool]$Co
$Response = GetHTTPResponse -Uri $VersionFileUrl
}
catch {
throw "Could not resolve version information."
Say-Error "Could not resolve version information."
throw
}
$StringContent = $Response.Content.ReadAsStringAsync().Result
@@ -314,7 +380,8 @@ function Parse-Jsonfile-For-Version([string]$JSonFile) {
$JSonContent = Get-Content($JSonFile) -Raw | ConvertFrom-Json | Select-Object -expand "sdk" -ErrorAction SilentlyContinue
}
catch {
throw "Json file unreadable: '$JSonFile'"
Say-Error "Json file unreadable: '$JSonFile'"
throw
}
if ($JSonContent) {
try {
@@ -327,7 +394,8 @@ function Parse-Jsonfile-For-Version([string]$JSonFile) {
}
}
catch {
throw "Unable to parse the SDK node in '$JSonFile'"
Say-Error "Unable to parse the SDK node in '$JSonFile'"
throw
}
}
else {
@@ -343,16 +411,12 @@ function Get-Specific-Version-From-Version([string]$AzureFeed, [string]$Channel,
Say-Invocation $MyInvocation
if (-not $JSonFile) {
switch ($Version.ToLower()) {
{ $_ -eq "latest" } {
$LatestVersionInfo = Get-Latest-Version-Info -AzureFeed $AzureFeed -Channel $Channel -Coherent $False
if ($Version.ToLower() -eq "latest") {
$LatestVersionInfo = Get-Latest-Version-Info -AzureFeed $AzureFeed -Channel $Channel
return $LatestVersionInfo.Version
}
{ $_ -eq "coherent" } {
$LatestVersionInfo = Get-Latest-Version-Info -AzureFeed $AzureFeed -Channel $Channel -Coherent $True
return $LatestVersionInfo.Version
}
default { return $Version }
else {
return $Version
}
}
else {
@@ -363,17 +427,29 @@ function Get-Specific-Version-From-Version([string]$AzureFeed, [string]$Channel,
function Get-Download-Link([string]$AzureFeed, [string]$SpecificVersion, [string]$CLIArchitecture) {
Say-Invocation $MyInvocation
# If anything fails in this lookup it will default to $SpecificVersion
$SpecificProductVersion = Get-Product-Version -AzureFeed $AzureFeed -SpecificVersion $SpecificVersion
if ($Runtime -eq "dotnet") {
$PayloadURL = "$AzureFeed/Runtime/$SpecificVersion/dotnet-runtime-$SpecificVersion-win-$CLIArchitecture.zip"
$PayloadURL = "$AzureFeed/Runtime/$SpecificVersion/dotnet-runtime-$SpecificProductVersion-win-$CLIArchitecture.zip"
}
elseif ($Runtime -eq "aspnetcore") {
$PayloadURL = "$AzureFeed/aspnetcore/Runtime/$SpecificVersion/aspnetcore-runtime-$SpecificVersion-win-$CLIArchitecture.zip"
$PayloadURL = "$AzureFeed/aspnetcore/Runtime/$SpecificVersion/aspnetcore-runtime-$SpecificProductVersion-win-$CLIArchitecture.zip"
}
elseif ($Runtime -eq "windowsdesktop") {
$PayloadURL = "$AzureFeed/Runtime/$SpecificVersion/windowsdesktop-runtime-$SpecificVersion-win-$CLIArchitecture.zip"
# The windows desktop runtime is part of the core runtime layout prior to 5.0
$PayloadURL = "$AzureFeed/Runtime/$SpecificVersion/windowsdesktop-runtime-$SpecificProductVersion-win-$CLIArchitecture.zip"
if ($SpecificVersion -match '^(\d+)\.(.*)$')
{
$majorVersion = [int]$Matches[1]
if ($majorVersion -ge 5)
{
$PayloadURL = "$AzureFeed/WindowsDesktop/$SpecificVersion/windowsdesktop-runtime-$SpecificProductVersion-win-$CLIArchitecture.zip"
}
}
}
elseif (-not $Runtime) {
$PayloadURL = "$AzureFeed/Sdk/$SpecificVersion/dotnet-sdk-$SpecificVersion-win-$CLIArchitecture.zip"
$PayloadURL = "$AzureFeed/Sdk/$SpecificVersion/dotnet-sdk-$SpecificProductVersion-win-$CLIArchitecture.zip"
}
else {
throw "Invalid value for `$Runtime"
@@ -381,7 +457,7 @@ function Get-Download-Link([string]$AzureFeed, [string]$SpecificVersion, [string
Say-Verbose "Constructed primary named payload URL: $PayloadURL"
return $PayloadURL
return $PayloadURL, $SpecificProductVersion
}
function Get-LegacyDownload-Link([string]$AzureFeed, [string]$SpecificVersion, [string]$CLIArchitecture) {
@@ -402,6 +478,60 @@ function Get-LegacyDownload-Link([string]$AzureFeed, [string]$SpecificVersion, [
return $PayloadURL
}
function Get-Product-Version([string]$AzureFeed, [string]$SpecificVersion) {
Say-Invocation $MyInvocation
if ($Runtime -eq "dotnet") {
$ProductVersionTxtURL = "$AzureFeed/Runtime/$SpecificVersion/productVersion.txt"
}
elseif ($Runtime -eq "aspnetcore") {
$ProductVersionTxtURL = "$AzureFeed/aspnetcore/Runtime/$SpecificVersion/productVersion.txt"
}
elseif ($Runtime -eq "windowsdesktop") {
# The windows desktop runtime is part of the core runtime layout prior to 5.0
$ProductVersionTxtURL = "$AzureFeed/Runtime/$SpecificVersion/productVersion.txt"
if ($SpecificVersion -match '^(\d+)\.(.*)')
{
$majorVersion = [int]$Matches[1]
if ($majorVersion -ge 5)
{
$ProductVersionTxtURL = "$AzureFeed/WindowsDesktop/$SpecificVersion/productVersion.txt"
}
}
}
elseif (-not $Runtime) {
$ProductVersionTxtURL = "$AzureFeed/Sdk/$SpecificVersion/productVersion.txt"
}
else {
throw "Invalid value '$Runtime' specified for `$Runtime"
}
Say-Verbose "Checking for existence of $ProductVersionTxtURL"
try {
$productVersionResponse = GetHTTPResponse($productVersionTxtUrl)
if ($productVersionResponse.StatusCode -eq 200) {
$productVersion = $productVersionResponse.Content.ReadAsStringAsync().Result.Trim()
if ($productVersion -ne $SpecificVersion)
{
Say "Using alternate version $productVersion found in $ProductVersionTxtURL"
}
return $productVersion
}
else {
Say-Verbose "Got StatusCode $($productVersionResponse.StatusCode) trying to get productVersion.txt at $productVersionTxtUrl, so using default value of $SpecificVersion"
$productVersion = $SpecificVersion
}
} catch {
Say-Verbose "Could not read productVersion.txt at $productVersionTxtUrl, so using default value of $SpecificVersion (Exception: '$($_.Exception.Message)' )"
$productVersion = $SpecificVersion
}
return $productVersion
}
function Get-User-Share-Path() {
Say-Invocation $MyInvocation
@@ -539,6 +669,23 @@ function DownloadFile($Source, [string]$OutPath) {
}
}
function SafeRemoveFile($Path) {
try {
if (Test-Path $Path) {
Remove-Item $Path
Say-Verbose "The temporary file `"$Path`" was removed."
}
else
{
Say-Verbose "The temporary file `"$Path`" does not exist, therefore is not removed."
}
}
catch
{
Say-Warning "Failed to remove the temporary file: `"$Path`", remove it manually."
}
}
function Prepend-Sdk-InstallRoot-To-Path([string]$InstallRoot, [string]$BinFolderRelativePath) {
$BinPath = Get-Absolute-Path $(Join-Path -Path $InstallRoot -ChildPath $BinFolderRelativePath)
if (-Not $NoPath) {
@@ -555,9 +702,14 @@ function Prepend-Sdk-InstallRoot-To-Path([string]$InstallRoot, [string]$BinFolde
}
}
Say "Note that the intended use of this script is for Continuous Integration (CI) scenarios, where:"
Say "- The SDK needs to be installed without user interaction and without admin rights."
Say "- The SDK installation doesn't need to persist across multiple CI runs."
Say "To set up a development environment or to run apps, use installers rather than this script. Visit https://dotnet.microsoft.com/download to get the installer.`r`n"
$CLIArchitecture = Get-CLIArchitecture-From-Architecture $Architecture
$SpecificVersion = Get-Specific-Version-From-Version -AzureFeed $AzureFeed -Channel $Channel -Version $Version -JSonFile $JSonFile
$DownloadLink = Get-Download-Link -AzureFeed $AzureFeed -SpecificVersion $SpecificVersion -CLIArchitecture $CLIArchitecture
$DownloadLink, $EffectiveVersion = Get-Download-Link -AzureFeed $AzureFeed -SpecificVersion $SpecificVersion -CLIArchitecture $CLIArchitecture
$LegacyDownloadLink = Get-LegacyDownload-Link -AzureFeed $AzureFeed -SpecificVersion $SpecificVersion -CLIArchitecture $CLIArchitecture
$InstallRoot = Resolve-Installation-Path $InstallDir
@@ -583,7 +735,12 @@ if ($DryRun) {
}
}
Say "Repeatable invocation: $RepeatableCommand"
exit 0
if ($SpecificVersion -ne $EffectiveVersion)
{
Say "NOTE: Due to finding a version manifest with this runtime, it would actually install with version '$EffectiveVersion'"
}
return
}
if ($Runtime -eq "dotnet") {
@@ -606,12 +763,18 @@ else {
throw "Invalid value for `$Runtime"
}
if ($SpecificVersion -ne $EffectiveVersion)
{
Say "Performing installation checks for effective version: $EffectiveVersion"
$SpecificVersion = $EffectiveVersion
}
# Check if the SDK version is already installed.
$isAssetInstalled = Is-Dotnet-Package-Installed -InstallRoot $InstallRoot -RelativePathToPackage $dotnetPackageRelativePath -SpecificVersion $SpecificVersion
if ($isAssetInstalled) {
Say "$assetName version $SpecificVersion is already installed."
Prepend-Sdk-InstallRoot-To-Path -InstallRoot $InstallRoot -BinFolderRelativePath $BinFolderRelativePath
exit 0
return
}
New-Item -ItemType Directory -Force -Path $InstallRoot | Out-Null
@@ -619,30 +782,69 @@ New-Item -ItemType Directory -Force -Path $InstallRoot | Out-Null
$installDrive = $((Get-Item $InstallRoot).PSDrive.Name);
$diskInfo = Get-PSDrive -Name $installDrive
if ($diskInfo.Free / 1MB -le 100) {
Say "There is not enough disk space on drive ${installDrive}:"
exit 0
throw "There is not enough disk space on drive ${installDrive}:"
}
$ZipPath = [System.IO.Path]::combine([System.IO.Path]::GetTempPath(), [System.IO.Path]::GetRandomFileName())
Say-Verbose "Zip path: $ZipPath"
$DownloadFailed = $false
Say "Downloading link: $DownloadLink"
$PrimaryDownloadStatusCode = 0
$LegacyDownloadStatusCode = 0
$PrimaryDownloadFailedMsg = ""
$LegacyDownloadFailedMsg = ""
Say "Downloading primary link $DownloadLink"
try {
DownloadFile -Source $DownloadLink -OutPath $ZipPath
}
catch {
Say "Cannot download: $DownloadLink"
if ($PSItem.Exception.Data.Contains("StatusCode")) {
$PrimaryDownloadStatusCode = $PSItem.Exception.Data["StatusCode"]
}
if ($PSItem.Exception.Data.Contains("ErrorMessage")) {
$PrimaryDownloadFailedMsg = $PSItem.Exception.Data["ErrorMessage"]
} else {
$PrimaryDownloadFailedMsg = $PSItem.Exception.Message
}
if ($PrimaryDownloadStatusCode -eq 404) {
Say "The resource at $DownloadLink is not available."
} else {
Say $PSItem.Exception.Message
}
SafeRemoveFile -Path $ZipPath
if ($LegacyDownloadLink) {
$DownloadLink = $LegacyDownloadLink
$ZipPath = [System.IO.Path]::combine([System.IO.Path]::GetTempPath(), [System.IO.Path]::GetRandomFileName())
Say-Verbose "Legacy zip path: $ZipPath"
Say "Downloading legacy link: $DownloadLink"
Say "Downloading legacy link $DownloadLink"
try {
DownloadFile -Source $DownloadLink -OutPath $ZipPath
}
catch {
Say "Cannot download: $DownloadLink"
if ($PSItem.Exception.Data.Contains("StatusCode")) {
$LegacyDownloadStatusCode = $PSItem.Exception.Data["StatusCode"]
}
if ($PSItem.Exception.Data.Contains("ErrorMessage")) {
$LegacyDownloadFailedMsg = $PSItem.Exception.Data["ErrorMessage"]
} else {
$LegacyDownloadFailedMsg = $PSItem.Exception.Message
}
if ($LegacyDownloadStatusCode -eq 404) {
Say "The resource at $DownloadLink is not available."
} else {
Say $PSItem.Exception.Message
}
SafeRemoveFile -Path $ZipPath
$DownloadFailed = $true
}
}
@@ -652,7 +854,19 @@ catch {
}
if ($DownloadFailed) {
throw "Could not find/download: `"$assetName`" with version = $SpecificVersion`nRefer to: https://aka.ms/dotnet-os-lifecycle for information on .NET Core support"
if (($PrimaryDownloadStatusCode -eq 404) -and ((-not $LegacyDownloadLink) -or ($LegacyDownloadStatusCode -eq 404))) {
throw "Could not find `"$assetName`" with version = $SpecificVersion`nRefer to: https://aka.ms/dotnet-os-lifecycle for information on .NET Core support"
} else {
# 404-NotFound is an expected response if it goes from only one of the links, do not show that error.
# If primary path is available (not 404-NotFound) then show the primary error else show the legacy error.
if ($PrimaryDownloadStatusCode -ne 404) {
throw "Could not download `"$assetName`" with version = $SpecificVersion`r`n$PrimaryDownloadFailedMsg"
}
if (($LegacyDownloadLink) -and ($LegacyDownloadStatusCode -ne 404)) {
throw "Could not download `"$assetName`" with version = $SpecificVersion`r`n$LegacyDownloadFailedMsg"
}
throw "Could not download `"$assetName`" with version = $SpecificVersion"
}
}
Say "Extracting zip from $DownloadLink"
@@ -674,13 +888,208 @@ if (!$isAssetInstalled) {
$isAssetInstalled = Is-Dotnet-Package-Installed -InstallRoot $InstallRoot -RelativePathToPackage $dotnetPackageRelativePath -SpecificVersion $SpecificVersion
}
# Version verification failed. More likely something is wrong either with the downloaded content or with the verification algorithm.
if (!$isAssetInstalled) {
Say-Error "Failed to verify the version of installed `"$assetName`".`nInstallation source: $DownloadLink.`nInstallation location: $InstallRoot.`nReport the bug at https://github.com/dotnet/install-scripts/issues."
throw "`"$assetName`" with version = $SpecificVersion failed to install with an unknown error."
}
Remove-Item $ZipPath
SafeRemoveFile -Path $ZipPath
Prepend-Sdk-InstallRoot-To-Path -InstallRoot $InstallRoot -BinFolderRelativePath $BinFolderRelativePath
Say "Note that the script does not resolve dependencies during installation."
Say "To check the list of dependencies, go to https://docs.microsoft.com/dotnet/core/install/windows#dependencies"
Say "Installation finished"
exit 0
# SIG # Begin signature block
# MIIjjwYJKoZIhvcNAQcCoIIjgDCCI3wCAQExDzANBglghkgBZQMEAgEFADB5Bgor
# BgEEAYI3AgEEoGswaTA0BgorBgEEAYI3AgEeMCYCAwEAAAQQH8w7YFlLCE63JNLG
# KX7zUQIBAAIBAAIBAAIBAAIBADAxMA0GCWCGSAFlAwQCAQUABCCNsnhcJvx/hXmM
# w8KjuvvIMDBFonhg9XJFc1QwfTyH4aCCDYEwggX/MIID56ADAgECAhMzAAABh3IX
# chVZQMcJAAAAAAGHMA0GCSqGSIb3DQEBCwUAMH4xCzAJBgNVBAYTAlVTMRMwEQYD
# VQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNy
# b3NvZnQgQ29ycG9yYXRpb24xKDAmBgNVBAMTH01pY3Jvc29mdCBDb2RlIFNpZ25p
# bmcgUENBIDIwMTEwHhcNMjAwMzA0MTgzOTQ3WhcNMjEwMzAzMTgzOTQ3WjB0MQsw
# CQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9u
# ZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMR4wHAYDVQQDExVNaWNy
# b3NvZnQgQ29ycG9yYXRpb24wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
# AQDOt8kLc7P3T7MKIhouYHewMFmnq8Ayu7FOhZCQabVwBp2VS4WyB2Qe4TQBT8aB
# znANDEPjHKNdPT8Xz5cNali6XHefS8i/WXtF0vSsP8NEv6mBHuA2p1fw2wB/F0dH
# sJ3GfZ5c0sPJjklsiYqPw59xJ54kM91IOgiO2OUzjNAljPibjCWfH7UzQ1TPHc4d
# weils8GEIrbBRb7IWwiObL12jWT4Yh71NQgvJ9Fn6+UhD9x2uk3dLj84vwt1NuFQ
# itKJxIV0fVsRNR3abQVOLqpDugbr0SzNL6o8xzOHL5OXiGGwg6ekiXA1/2XXY7yV
# Fc39tledDtZjSjNbex1zzwSXAgMBAAGjggF+MIIBejAfBgNVHSUEGDAWBgorBgEE
# AYI3TAgBBggrBgEFBQcDAzAdBgNVHQ4EFgQUhov4ZyO96axkJdMjpzu2zVXOJcsw
# UAYDVR0RBEkwR6RFMEMxKTAnBgNVBAsTIE1pY3Jvc29mdCBPcGVyYXRpb25zIFB1
# ZXJ0byBSaWNvMRYwFAYDVQQFEw0yMzAwMTIrNDU4Mzg1MB8GA1UdIwQYMBaAFEhu
# ZOVQBdOCqhc3NyK1bajKdQKVMFQGA1UdHwRNMEswSaBHoEWGQ2h0dHA6Ly93d3cu
# bWljcm9zb2Z0LmNvbS9wa2lvcHMvY3JsL01pY0NvZFNpZ1BDQTIwMTFfMjAxMS0w
# Ny0wOC5jcmwwYQYIKwYBBQUHAQEEVTBTMFEGCCsGAQUFBzAChkVodHRwOi8vd3d3
# Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NlcnRzL01pY0NvZFNpZ1BDQTIwMTFfMjAx
# MS0wNy0wOC5jcnQwDAYDVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOCAgEAixmy
# S6E6vprWD9KFNIB9G5zyMuIjZAOuUJ1EK/Vlg6Fb3ZHXjjUwATKIcXbFuFC6Wr4K
# NrU4DY/sBVqmab5AC/je3bpUpjtxpEyqUqtPc30wEg/rO9vmKmqKoLPT37svc2NV
# BmGNl+85qO4fV/w7Cx7J0Bbqk19KcRNdjt6eKoTnTPHBHlVHQIHZpMxacbFOAkJr
# qAVkYZdz7ikNXTxV+GRb36tC4ByMNxE2DF7vFdvaiZP0CVZ5ByJ2gAhXMdK9+usx
# zVk913qKde1OAuWdv+rndqkAIm8fUlRnr4saSCg7cIbUwCCf116wUJ7EuJDg0vHe
# yhnCeHnBbyH3RZkHEi2ofmfgnFISJZDdMAeVZGVOh20Jp50XBzqokpPzeZ6zc1/g
# yILNyiVgE+RPkjnUQshd1f1PMgn3tns2Cz7bJiVUaqEO3n9qRFgy5JuLae6UweGf
# AeOo3dgLZxikKzYs3hDMaEtJq8IP71cX7QXe6lnMmXU/Hdfz2p897Zd+kU+vZvKI
# 3cwLfuVQgK2RZ2z+Kc3K3dRPz2rXycK5XCuRZmvGab/WbrZiC7wJQapgBodltMI5
# GMdFrBg9IeF7/rP4EqVQXeKtevTlZXjpuNhhjuR+2DMt/dWufjXpiW91bo3aH6Ea
# jOALXmoxgltCp1K7hrS6gmsvj94cLRf50QQ4U8Qwggd6MIIFYqADAgECAgphDpDS
# AAAAAAADMA0GCSqGSIb3DQEBCwUAMIGIMQswCQYDVQQGEwJVUzETMBEGA1UECBMK
# V2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0
# IENvcnBvcmF0aW9uMTIwMAYDVQQDEylNaWNyb3NvZnQgUm9vdCBDZXJ0aWZpY2F0
# ZSBBdXRob3JpdHkgMjAxMTAeFw0xMTA3MDgyMDU5MDlaFw0yNjA3MDgyMTA5MDla
# MH4xCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdS
# ZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKDAmBgNVBAMT
# H01pY3Jvc29mdCBDb2RlIFNpZ25pbmcgUENBIDIwMTEwggIiMA0GCSqGSIb3DQEB
# AQUAA4ICDwAwggIKAoICAQCr8PpyEBwurdhuqoIQTTS68rZYIZ9CGypr6VpQqrgG
# OBoESbp/wwwe3TdrxhLYC/A4wpkGsMg51QEUMULTiQ15ZId+lGAkbK+eSZzpaF7S
# 35tTsgosw6/ZqSuuegmv15ZZymAaBelmdugyUiYSL+erCFDPs0S3XdjELgN1q2jz
# y23zOlyhFvRGuuA4ZKxuZDV4pqBjDy3TQJP4494HDdVceaVJKecNvqATd76UPe/7
# 4ytaEB9NViiienLgEjq3SV7Y7e1DkYPZe7J7hhvZPrGMXeiJT4Qa8qEvWeSQOy2u
# M1jFtz7+MtOzAz2xsq+SOH7SnYAs9U5WkSE1JcM5bmR/U7qcD60ZI4TL9LoDho33
# X/DQUr+MlIe8wCF0JV8YKLbMJyg4JZg5SjbPfLGSrhwjp6lm7GEfauEoSZ1fiOIl
# XdMhSz5SxLVXPyQD8NF6Wy/VI+NwXQ9RRnez+ADhvKwCgl/bwBWzvRvUVUvnOaEP
# 6SNJvBi4RHxF5MHDcnrgcuck379GmcXvwhxX24ON7E1JMKerjt/sW5+v/N2wZuLB
# l4F77dbtS+dJKacTKKanfWeA5opieF+yL4TXV5xcv3coKPHtbcMojyyPQDdPweGF
# RInECUzF1KVDL3SV9274eCBYLBNdYJWaPk8zhNqwiBfenk70lrC8RqBsmNLg1oiM
# CwIDAQABo4IB7TCCAekwEAYJKwYBBAGCNxUBBAMCAQAwHQYDVR0OBBYEFEhuZOVQ
# BdOCqhc3NyK1bajKdQKVMBkGCSsGAQQBgjcUAgQMHgoAUwB1AGIAQwBBMAsGA1Ud
# DwQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFHItOgIxkEO5FAVO
# 4eqnxzHRI4k0MFoGA1UdHwRTMFEwT6BNoEuGSWh0dHA6Ly9jcmwubWljcm9zb2Z0
# LmNvbS9wa2kvY3JsL3Byb2R1Y3RzL01pY1Jvb0NlckF1dDIwMTFfMjAxMV8wM18y
# Mi5jcmwwXgYIKwYBBQUHAQEEUjBQME4GCCsGAQUFBzAChkJodHRwOi8vd3d3Lm1p
# Y3Jvc29mdC5jb20vcGtpL2NlcnRzL01pY1Jvb0NlckF1dDIwMTFfMjAxMV8wM18y
# Mi5jcnQwgZ8GA1UdIASBlzCBlDCBkQYJKwYBBAGCNy4DMIGDMD8GCCsGAQUFBwIB
# FjNodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2RvY3MvcHJpbWFyeWNw
# cy5odG0wQAYIKwYBBQUHAgIwNB4yIB0ATABlAGcAYQBsAF8AcABvAGwAaQBjAHkA
# XwBzAHQAYQB0AGUAbQBlAG4AdAAuIB0wDQYJKoZIhvcNAQELBQADggIBAGfyhqWY
# 4FR5Gi7T2HRnIpsLlhHhY5KZQpZ90nkMkMFlXy4sPvjDctFtg/6+P+gKyju/R6mj
# 82nbY78iNaWXXWWEkH2LRlBV2AySfNIaSxzzPEKLUtCw/WvjPgcuKZvmPRul1LUd
# d5Q54ulkyUQ9eHoj8xN9ppB0g430yyYCRirCihC7pKkFDJvtaPpoLpWgKj8qa1hJ
# Yx8JaW5amJbkg/TAj/NGK978O9C9Ne9uJa7lryft0N3zDq+ZKJeYTQ49C/IIidYf
# wzIY4vDFLc5bnrRJOQrGCsLGra7lstnbFYhRRVg4MnEnGn+x9Cf43iw6IGmYslmJ
# aG5vp7d0w0AFBqYBKig+gj8TTWYLwLNN9eGPfxxvFX1Fp3blQCplo8NdUmKGwx1j
# NpeG39rz+PIWoZon4c2ll9DuXWNB41sHnIc+BncG0QaxdR8UvmFhtfDcxhsEvt9B
# xw4o7t5lL+yX9qFcltgA1qFGvVnzl6UJS0gQmYAf0AApxbGbpT9Fdx41xtKiop96
# eiL6SJUfq/tHI4D1nvi/a7dLl+LrdXga7Oo3mXkYS//WsyNodeav+vyL6wuA6mk7
# r/ww7QRMjt/fdW1jkT3RnVZOT7+AVyKheBEyIXrvQQqxP/uozKRdwaGIm1dxVk5I
# RcBCyZt2WwqASGv9eZ/BvW1taslScxMNelDNMYIVZDCCFWACAQEwgZUwfjELMAkG
# A1UEBhMCVVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQx
# HjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEoMCYGA1UEAxMfTWljcm9z
# b2Z0IENvZGUgU2lnbmluZyBQQ0EgMjAxMQITMwAAAYdyF3IVWUDHCQAAAAABhzAN
# BglghkgBZQMEAgEFAKCBrjAZBgkqhkiG9w0BCQMxDAYKKwYBBAGCNwIBBDAcBgor
# BgEEAYI3AgELMQ4wDAYKKwYBBAGCNwIBFTAvBgkqhkiG9w0BCQQxIgQgpT/bxWwe
# aW0EinKMWCAzDXUjwXkIHldYzR6lw4/1Pc0wQgYKKwYBBAGCNwIBDDE0MDKgFIAS
# AE0AaQBjAHIAbwBzAG8AZgB0oRqAGGh0dHA6Ly93d3cubWljcm9zb2Z0LmNvbTAN
# BgkqhkiG9w0BAQEFAASCAQCHd7sSQVq0YDg8QDx6/kLWn3s6jtvvIDCCgsO9spHM
# quPd4FPbG67DCsKDClekQs52qrtRO3Zo+JMnCw4j3bS+gZHzeJr2shbftOrpsFoD
# l7OPcUmtrqul9dkQCOp8t0MP3ls0n96/YyNy6lz4BAlTdkdDx957uAxalKaCIBzb
# R9QyppOKIfNFvwD4EI5KI6tpmSy/uH8SrRg7ZExAYZl6J6R18WkL7KHn649lPoAQ
# ujwrIXH10xOJops45ILGzKWQcHmCzLJGYapL4VHUuK+73nT+9ZROGHdk/PyvIcdw
# iERa+C06v305t3DA+CuHFy1tvyw7IFF6RVbLZPwxrJjToYIS7jCCEuoGCisGAQQB
# gjcDAwExghLaMIIS1gYJKoZIhvcNAQcCoIISxzCCEsMCAQMxDzANBglghkgBZQME
# AgEFADCCAVUGCyqGSIb3DQEJEAEEoIIBRASCAUAwggE8AgEBBgorBgEEAYRZCgMB
# MDEwDQYJYIZIAWUDBAIBBQAEIOCaTmvM1AP0WaEVqzKaaCu/R+bTlR4kCrM/ZXsb
# /eNOAgZgGeLsMwsYEzIwMjEwMjAzMjExNzQ5LjU5MVowBIACAfSggdSkgdEwgc4x
# CzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRt
# b25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKTAnBgNVBAsTIE1p
# Y3Jvc29mdCBPcGVyYXRpb25zIFB1ZXJ0byBSaWNvMSYwJAYDVQQLEx1UaGFsZXMg
# VFNTIEVTTjo4OTdBLUUzNTYtMTcwMTElMCMGA1UEAxMcTWljcm9zb2Z0IFRpbWUt
# U3RhbXAgU2VydmljZaCCDkEwggT1MIID3aADAgECAhMzAAABLCKvRZd1+RvuAAAA
# AAEsMA0GCSqGSIb3DQEBCwUAMHwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNo
# aW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29y
# cG9yYXRpb24xJjAkBgNVBAMTHU1pY3Jvc29mdCBUaW1lLVN0YW1wIFBDQSAyMDEw
# MB4XDTE5MTIxOTAxMTUwM1oXDTIxMDMxNzAxMTUwM1owgc4xCzAJBgNVBAYTAlVT
# MRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQK
# ExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKTAnBgNVBAsTIE1pY3Jvc29mdCBPcGVy
# YXRpb25zIFB1ZXJ0byBSaWNvMSYwJAYDVQQLEx1UaGFsZXMgVFNTIEVTTjo4OTdB
# LUUzNTYtMTcwMTElMCMGA1UEAxMcTWljcm9zb2Z0IFRpbWUtU3RhbXAgU2Vydmlj
# ZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPK1zgSSq+MxAYo3qpCt
# QDxSMPPJy6mm/wfEJNjNUnYtLFBwl1BUS5trEk/t41ldxITKehs+ABxYqo4Qxsg3
# Gy1ugKiwHAnYiiekfC+ZhptNFgtnDZIn45zC0AlVr/6UfLtsLcHCh1XElLUHfEC0
# nBuQcM/SpYo9e3l1qY5NdMgDGxCsmCKdiZfYXIu+U0UYIBhdzmSHnB3fxZOBVcr5
# htFHEBBNt/rFJlm/A4yb8oBsp+Uf0p5QwmO/bCcdqB15JpylOhZmWs0sUfJKlK9E
# rAhBwGki2eIRFKsQBdkXS9PWpF1w2gIJRvSkDEaCf+lbGTPdSzHSbfREWOF9wY3i
# Yj8CAwEAAaOCARswggEXMB0GA1UdDgQWBBRRahZSGfrCQhCyIyGH9DkiaW7L0zAf
# BgNVHSMEGDAWgBTVYzpcijGQ80N7fEYbxTNoWoVtVTBWBgNVHR8ETzBNMEugSaBH
# hkVodHRwOi8vY3JsLm1pY3Jvc29mdC5jb20vcGtpL2NybC9wcm9kdWN0cy9NaWNU
# aW1TdGFQQ0FfMjAxMC0wNy0wMS5jcmwwWgYIKwYBBQUHAQEETjBMMEoGCCsGAQUF
# BzAChj5odHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpL2NlcnRzL01pY1RpbVN0
# YVBDQV8yMDEwLTA3LTAxLmNydDAMBgNVHRMBAf8EAjAAMBMGA1UdJQQMMAoGCCsG
# AQUFBwMIMA0GCSqGSIb3DQEBCwUAA4IBAQBPFxHIwi4vAH49w9Svmz6K3tM55RlW
# 5pPeULXdut2Rqy6Ys0+VpZsbuaEoxs6Z1C3hMbkiqZFxxyltxJpuHTyGTg61zfNI
# F5n6RsYF3s7IElDXNfZznF1/2iWc6uRPZK8rxxUJ/7emYXZCYwuUY0XjsCpP9pbR
# RKeJi6r5arSyI+NfKxvgoM21JNt1BcdlXuAecdd/k8UjxCscffanoK2n6LFw1PcZ
# lEO7NId7o+soM2C0QY5BYdghpn7uqopB6ixyFIIkDXFub+1E7GmAEwfU6VwEHL7y
# 9rNE8bd+JrQs+yAtkkHy9FmXg/PsGq1daVzX1So7CJ6nyphpuHSN3VfTMIIGcTCC
# BFmgAwIBAgIKYQmBKgAAAAAAAjANBgkqhkiG9w0BAQsFADCBiDELMAkGA1UEBhMC
# VVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNV
# BAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEyMDAGA1UEAxMpTWljcm9zb2Z0IFJv
# b3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTAwHhcNMTAwNzAxMjEzNjU1WhcN
# MjUwNzAxMjE0NjU1WjB8MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3Rv
# bjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0
# aW9uMSYwJAYDVQQDEx1NaWNyb3NvZnQgVGltZS1TdGFtcCBQQ0EgMjAxMDCCASIw
# DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKkdDbx3EYo6IOz8E5f1+n9plGt0
# VBDVpQoAgoX77XxoSyxfxcPlYcJ2tz5mK1vwFVMnBDEfQRsalR3OCROOfGEwWbEw
# RA/xYIiEVEMM1024OAizQt2TrNZzMFcmgqNFDdDq9UeBzb8kYDJYYEbyWEeGMoQe
# dGFnkV+BVLHPk0ySwcSmXdFhE24oxhr5hoC732H8RsEnHSRnEnIaIYqvS2SJUGKx
# Xf13Hz3wV3WsvYpCTUBR0Q+cBj5nf/VmwAOWRH7v0Ev9buWayrGo8noqCjHw2k4G
# kbaICDXoeByw6ZnNPOcvRLqn9NxkvaQBwSAJk3jN/LzAyURdXhacAQVPIk0CAwEA
# AaOCAeYwggHiMBAGCSsGAQQBgjcVAQQDAgEAMB0GA1UdDgQWBBTVYzpcijGQ80N7
# fEYbxTNoWoVtVTAZBgkrBgEEAYI3FAIEDB4KAFMAdQBiAEMAQTALBgNVHQ8EBAMC
# AYYwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBTV9lbLj+iiXGJo0T2UkFvX
# zpoYxDBWBgNVHR8ETzBNMEugSaBHhkVodHRwOi8vY3JsLm1pY3Jvc29mdC5jb20v
# cGtpL2NybC9wcm9kdWN0cy9NaWNSb29DZXJBdXRfMjAxMC0wNi0yMy5jcmwwWgYI
# KwYBBQUHAQEETjBMMEoGCCsGAQUFBzAChj5odHRwOi8vd3d3Lm1pY3Jvc29mdC5j
# b20vcGtpL2NlcnRzL01pY1Jvb0NlckF1dF8yMDEwLTA2LTIzLmNydDCBoAYDVR0g
# AQH/BIGVMIGSMIGPBgkrBgEEAYI3LgMwgYEwPQYIKwYBBQUHAgEWMWh0dHA6Ly93
# d3cubWljcm9zb2Z0LmNvbS9QS0kvZG9jcy9DUFMvZGVmYXVsdC5odG0wQAYIKwYB
# BQUHAgIwNB4yIB0ATABlAGcAYQBsAF8AUABvAGwAaQBjAHkAXwBTAHQAYQB0AGUA
# bQBlAG4AdAAuIB0wDQYJKoZIhvcNAQELBQADggIBAAfmiFEN4sbgmD+BcQM9naOh
# IW+z66bM9TG+zwXiqf76V20ZMLPCxWbJat/15/B4vceoniXj+bzta1RXCCtRgkQS
# +7lTjMz0YBKKdsxAQEGb3FwX/1z5Xhc1mCRWS3TvQhDIr79/xn/yN31aPxzymXlK
# kVIArzgPF/UveYFl2am1a+THzvbKegBvSzBEJCI8z+0DpZaPWSm8tv0E4XCfMkon
# /VWvL/625Y4zu2JfmttXQOnxzplmkIz/amJ/3cVKC5Em4jnsGUpxY517IW3DnKOi
# PPp/fZZqkHimbdLhnPkd/DjYlPTGpQqWhqS9nhquBEKDuLWAmyI4ILUl5WTs9/S/
# fmNZJQ96LjlXdqJxqgaKD4kWumGnEcua2A5HmoDF0M2n0O99g/DhO3EJ3110mCII
# YdqwUB5vvfHhAN/nMQekkzr3ZUd46PioSKv33nJ+YWtvd6mBy6cJrDm77MbL2IK0
# cs0d9LiFAR6A+xuJKlQ5slvayA1VmXqHczsI5pgt6o3gMy4SKfXAL1QnIffIrE7a
# KLixqduWsqdCosnPGUFN4Ib5KpqjEWYw07t0MkvfY3v1mYovG8chr1m1rtxEPJdQ
# cdeh0sVV42neV8HR3jDA/czmTfsNv11P6Z0eGTgvvM9YBS7vDaBQNdrvCScc1bN+
# NR4Iuto229Nfj950iEkSoYICzzCCAjgCAQEwgfyhgdSkgdEwgc4xCzAJBgNVBAYT
# AlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYD
# VQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKTAnBgNVBAsTIE1pY3Jvc29mdCBP
# cGVyYXRpb25zIFB1ZXJ0byBSaWNvMSYwJAYDVQQLEx1UaGFsZXMgVFNTIEVTTjo4
# OTdBLUUzNTYtMTcwMTElMCMGA1UEAxMcTWljcm9zb2Z0IFRpbWUtU3RhbXAgU2Vy
# dmljZaIjCgEBMAcGBSsOAwIaAxUADE5OKSMoNx/mYxYWap1RTOohbJ2ggYMwgYCk
# fjB8MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMH
# UmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSYwJAYDVQQD
# Ex1NaWNyb3NvZnQgVGltZS1TdGFtcCBQQ0EgMjAxMDANBgkqhkiG9w0BAQUFAAIF
# AOPFChkwIhgPMjAyMTAyMDMxNTQwMDlaGA8yMDIxMDIwNDE1NDAwOVowdDA6Bgor
# BgEEAYRZCgQBMSwwKjAKAgUA48UKGQIBADAHAgEAAgIXmDAHAgEAAgIRyTAKAgUA
# 48ZbmQIBADA2BgorBgEEAYRZCgQCMSgwJjAMBgorBgEEAYRZCgMCoAowCAIBAAID
# B6EgoQowCAIBAAIDAYagMA0GCSqGSIb3DQEBBQUAA4GBAHeeznL2n6HWCjHH94Fl
# hcdW6TEXzq4XNgp1Gx1W9F8gJ4x+SwoV7elJZkwgGffcpHomLvIY/VSuzsl1NgtJ
# TWM2UxoqSv58BBOrl4eGhH6kkg8Ucy2tdeK5T8cHa8pMkq2j9pFd2mRG/6VMk0dl
# Xz7Uy3Z6bZqkcABMyAfuAaGbMYIDDTCCAwkCAQEwgZMwfDELMAkGA1UEBhMCVVMx
# EzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoT
# FU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEmMCQGA1UEAxMdTWljcm9zb2Z0IFRpbWUt
# U3RhbXAgUENBIDIwMTACEzMAAAEsIq9Fl3X5G+4AAAAAASwwDQYJYIZIAWUDBAIB
# BQCgggFKMBoGCSqGSIb3DQEJAzENBgsqhkiG9w0BCRABBDAvBgkqhkiG9w0BCQQx
# IgQg/QYv7yp+354WTjWUIsXWndTEzXjaYjqwYjcBxCJKjdUwgfoGCyqGSIb3DQEJ
# EAIvMYHqMIHnMIHkMIG9BCBbn/0uFFh42hTM5XOoKdXevBaiSxmYK9Ilcn9nu5ZH
# 4TCBmDCBgKR+MHwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAw
# DgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24x
# JjAkBgNVBAMTHU1pY3Jvc29mdCBUaW1lLVN0YW1wIFBDQSAyMDEwAhMzAAABLCKv
# RZd1+RvuAAAAAAEsMCIEIIfIM3YbzHswb/Kj/qq1l1cHA6QBl+gEXYanUNJomrpT
# MA0GCSqGSIb3DQEBCwUABIIBAAwdcXssUZGO7ho5+NHLjIxLtQk543aKGo+lrRMY
# Q9abE1h/AaaNJl0iGxX4IihNWyfovSfYL3L4eODUBAu68tWSxeceRfWNsb/ZZfUi
# v89hpLssI/Gf1BEgNMA4zCuIGQiC8okusVumEpAhhvCEbSiTTTtBdolTnU/CAKui
# oxaU3R9XkKh1F4oAM26+dJ1J2BLQXPs5afNvvedDsZWNQUPK1sFF3JRfzxiTrwBW
# EJRyflev9gyDoqCHzippgb+6+eti1WTkcA9Q49GIT11S6LOAVqkSC9N7Nqf8ksh8
# ARdwT8jigpsm+mj7lrVU9upDkhVYhKeO8oiZq95Q53Zkteo=
# SIG # End signature block

View File

@@ -40,7 +40,7 @@ if [ -t 1 ] && command -v tput > /dev/null; then
fi
say_warning() {
printf "%b\n" "${yellow:-}dotnet_install: Warning: $1${normal:-}"
printf "%b\n" "${yellow:-}dotnet_install: Warning: $1${normal:-}" >&3
}
say_err() {
@@ -183,6 +183,9 @@ get_current_os_name() {
elif is_musl_based_distro; then
echo "linux-musl"
return 0
elif [ "$linux_platform_name" = "linux-musl" ]; then
echo "linux-musl"
return 0
else
echo "linux"
return 0
@@ -241,42 +244,6 @@ check_min_reqs() {
return 0
}
check_pre_reqs() {
eval $invocation
if [ "${DOTNET_INSTALL_SKIP_PREREQS:-}" = "1" ]; then
return 0
fi
if [ "$(uname)" = "Linux" ]; then
if is_musl_based_distro; then
if ! command -v scanelf > /dev/null; then
say_warning "scanelf not found, please install pax-utils package."
return 0
fi
LDCONFIG_COMMAND="scanelf --ldpath -BF '%f'"
[ -z "$($LDCONFIG_COMMAND 2>/dev/null | grep libintl)" ] && say_warning "Unable to locate libintl. Probable prerequisite missing; install libintl (or gettext)."
else
if [ ! -x "$(command -v ldconfig)" ]; then
say_verbose "ldconfig is not in PATH, trying /sbin/ldconfig."
LDCONFIG_COMMAND="/sbin/ldconfig"
else
LDCONFIG_COMMAND="ldconfig"
fi
local librarypath=${LD_LIBRARY_PATH:-}
LDCONFIG_COMMAND="$LDCONFIG_COMMAND -NXv ${librarypath//:/ }"
fi
[ -z "$($LDCONFIG_COMMAND 2>/dev/null | grep zlib)" ] && say_warning "Unable to locate zlib. Probable prerequisite missing; install zlib."
[ -z "$($LDCONFIG_COMMAND 2>/dev/null | grep ssl)" ] && say_warning "Unable to locate libssl. Probable prerequisite missing; install libssl."
[ -z "$($LDCONFIG_COMMAND 2>/dev/null | grep libicu)" ] && say_warning "Unable to locate libicu. Probable prerequisite missing; install libicu."
[ -z "$($LDCONFIG_COMMAND 2>/dev/null | grep lttng)" ] && say_warning "Unable to locate liblttng. Probable prerequisite missing; install libcurl."
[ -z "$($LDCONFIG_COMMAND 2>/dev/null | grep libcurl)" ] && say_warning "Unable to locate libcurl. Probable prerequisite missing; install libcurl."
fi
return 0
}
# args:
# input - $1
to_lowercase() {
@@ -332,11 +299,11 @@ get_machine_architecture() {
if command -v uname > /dev/null; then
CPUName=$(uname -m)
case $CPUName in
armv7l)
armv*l)
echo "arm"
return 0
;;
aarch64)
aarch64|arm64)
echo "arm64"
return 0
;;
@@ -373,10 +340,34 @@ get_normalized_architecture_from_architecture() {
;;
esac
say_err "Architecture \`$architecture\` not supported. If you think this is a bug, report it at https://github.com/dotnet/sdk/issues"
say_err "Architecture \`$architecture\` not supported. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues"
return 1
}
# args:
# user_defined_os - $1
get_normalized_os() {
eval $invocation
local osname="$(to_lowercase "$1")"
if [ ! -z "$osname" ]; then
case "$osname" in
osx | freebsd | rhel.6 | linux-musl | linux)
echo "$osname"
return 0
;;
*)
say_err "'$user_defined_os' is not a supported value for --os option, supported values are: osx, linux, linux-musl, freebsd, rhel.6. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues."
return 1
;;
esac
else
osname="$(get_current_os_name)" || return 1
fi
echo "$osname"
return 0
}
# The version text returned from the feeds is a 1-line or 2-line string:
# For the SDK and the dotnet runtime (2 lines):
# Line 1: # commit_hash
@@ -418,14 +409,12 @@ is_dotnet_package_installed() {
# azure_feed - $1
# channel - $2
# normalized_architecture - $3
# coherent - $4
get_latest_version_info() {
eval $invocation
local azure_feed="$1"
local channel="$2"
local normalized_architecture="$3"
local coherent="$4"
local version_file_url=null
if [[ "$runtime" == "dotnet" ]]; then
@@ -433,11 +422,7 @@ get_latest_version_info() {
elif [[ "$runtime" == "aspnetcore" ]]; then
version_file_url="$uncached_feed/aspnetcore/Runtime/$channel/latest.version"
elif [ -z "$runtime" ]; then
if [ "$coherent" = true ]; then
version_file_url="$uncached_feed/Sdk/$channel/latest.coherent.version"
else
version_file_url="$uncached_feed/Sdk/$channel/latest.version"
fi
else
say_err "Invalid value for \$runtime"
return 1
@@ -468,7 +453,6 @@ parse_jsonfile_for_version() {
sdk_list=$(echo $sdk_section | awk -F"[{}]" '{print $2}')
sdk_list=${sdk_list//[\" ]/}
sdk_list=${sdk_list//,/$'\n'}
sdk_list="$(echo -e "${sdk_list}" | tr -d '[[:space:]]')"
local version_info=""
while read -r line; do
@@ -505,26 +489,16 @@ get_specific_version_from_version() {
local json_file="$5"
if [ -z "$json_file" ]; then
case "$version" in
latest)
if [[ "$version" == "latest" ]]; then
local version_info
version_info="$(get_latest_version_info "$azure_feed" "$channel" "$normalized_architecture" false)" || return 1
say_verbose "get_specific_version_from_version: version_info=$version_info"
echo "$version_info" | get_version_from_version_info
return 0
;;
coherent)
local version_info
version_info="$(get_latest_version_info "$azure_feed" "$channel" "$normalized_architecture" true)" || return 1
say_verbose "get_specific_version_from_version: version_info=$version_info"
echo "$version_info" | get_version_from_version_info
return 0
;;
*)
else
echo "$version"
return 0
;;
esac
fi
else
local version_info
version_info="$(parse_jsonfile_for_version "$json_file")" || return 1
@@ -538,6 +512,7 @@ get_specific_version_from_version() {
# channel - $2
# normalized_architecture - $3
# specific_version - $4
# normalized_os - $5
construct_download_link() {
eval $invocation
@@ -545,17 +520,16 @@ construct_download_link() {
local channel="$2"
local normalized_architecture="$3"
local specific_version="${4//[$'\t\r\n']}"
local osname
osname="$(get_current_os_name)" || return 1
local specific_product_version="$(get_specific_product_version "$1" "$4")"
local osname="$5"
local download_link=null
if [[ "$runtime" == "dotnet" ]]; then
download_link="$azure_feed/Runtime/$specific_version/dotnet-runtime-$specific_version-$osname-$normalized_architecture.tar.gz"
download_link="$azure_feed/Runtime/$specific_version/dotnet-runtime-$specific_product_version-$osname-$normalized_architecture.tar.gz"
elif [[ "$runtime" == "aspnetcore" ]]; then
download_link="$azure_feed/aspnetcore/Runtime/$specific_version/aspnetcore-runtime-$specific_version-$osname-$normalized_architecture.tar.gz"
download_link="$azure_feed/aspnetcore/Runtime/$specific_version/aspnetcore-runtime-$specific_product_version-$osname-$normalized_architecture.tar.gz"
elif [ -z "$runtime" ]; then
download_link="$azure_feed/Sdk/$specific_version/dotnet-sdk-$specific_version-$osname-$normalized_architecture.tar.gz"
download_link="$azure_feed/Sdk/$specific_version/dotnet-sdk-$specific_product_version-$osname-$normalized_architecture.tar.gz"
else
return 1
fi
@@ -564,6 +538,50 @@ construct_download_link() {
return 0
}
# args:
# azure_feed - $1
# specific_version - $2
get_specific_product_version() {
# If we find a 'productVersion.txt' at the root of any folder, we'll use its contents
# to resolve the version of what's in the folder, superseding the specified version.
eval $invocation
local azure_feed="$1"
local specific_version="${2//[$'\t\r\n']}"
local specific_product_version=$specific_version
local download_link=null
if [[ "$runtime" == "dotnet" ]]; then
download_link="$azure_feed/Runtime/$specific_version/productVersion.txt${feed_credential}"
elif [[ "$runtime" == "aspnetcore" ]]; then
download_link="$azure_feed/aspnetcore/Runtime/$specific_version/productVersion.txt${feed_credential}"
elif [ -z "$runtime" ]; then
download_link="$azure_feed/Sdk/$specific_version/productVersion.txt${feed_credential}"
else
return 1
fi
if machine_has "curl"
then
specific_product_version=$(curl -s --fail "$download_link")
if [ $? -ne 0 ]
then
specific_product_version=$specific_version
fi
elif machine_has "wget"
then
specific_product_version=$(wget -qO- "$download_link")
if [ $? -ne 0 ]
then
specific_product_version=$specific_version
fi
fi
specific_product_version="${specific_product_version//[$'\t\r\n']}"
echo "$specific_product_version"
return 0
}
# args:
# azure_feed - $1
# channel - $2
@@ -684,11 +702,31 @@ extract_dotnet_package() {
find "$temp_out_path" -type f | grep -Ev "$folders_with_version_regex" | copy_files_or_dirs_from_list "$temp_out_path" "$out_path" "$override_non_versioned_files"
rm -rf "$temp_out_path"
rm -f "$zip_path" && say_verbose "Temporary zip file $zip_path was removed"
if [ "$failed" = true ]; then
say_err "Extraction failed"
return 1
fi
return 0
}
get_http_header_curl() {
eval $invocation
local remote_path="$1"
remote_path_with_credential="${remote_path}${feed_credential}"
curl_options="-I -sSL --retry 5 --retry-delay 2 --connect-timeout 15 "
curl $curl_options "$remote_path_with_credential" || return 1
return 0
}
get_http_header_wget() {
eval $invocation
local remote_path="$1"
remote_path_with_credential="${remote_path}${feed_credential}"
wget_options="-q -S --spider --tries 5 --waitretry 2 --connect-timeout 15 "
wget $wget_options "$remote_path_with_credential" 2>&1 || return 1
return 0
}
# args:
@@ -706,13 +744,30 @@ download() {
fi
local failed=false
local attempts=0
while [ $attempts -lt 3 ]; do
attempts=$((attempts+1))
failed=false
if machine_has "curl"; then
downloadcurl "$remote_path" "$out_path" || failed=true
elif machine_has "wget"; then
downloadwget "$remote_path" "$out_path" || failed=true
else
failed=true
say_err "Missing dependency: neither curl nor wget was found."
exit 1
fi
if [ "$failed" = false ] || [ $attempts -ge 3 ] || { [ ! -z $http_code ] && [ $http_code = "404" ]; }; then
break
fi
say "Download attempt #$attempts has failed: $http_code $download_error_msg"
say "Attempt #$((attempts+1)) will start in $((attempts*10)) seconds."
sleep $((attempts*20))
done
if [ "$failed" = true ]; then
say_verbose "Download failed: $remote_path"
return 1
@@ -720,44 +775,60 @@ download() {
return 0
}
# Updates global variables $http_code and $download_error_msg
downloadcurl() {
eval $invocation
unset http_code
unset download_error_msg
local remote_path="$1"
local out_path="${2:-}"
# Append feed_credential as late as possible before calling curl to avoid logging feed_credential
remote_path="${remote_path}${feed_credential}"
local remote_path_with_credential="${remote_path}${feed_credential}"
local curl_options="--retry 20 --retry-delay 2 --connect-timeout 15 -sSL -f --create-dirs "
local failed=false
if [ -z "$out_path" ]; then
curl $curl_options "$remote_path" || failed=true
curl $curl_options "$remote_path_with_credential" || failed=true
else
curl $curl_options -o "$out_path" "$remote_path" || failed=true
curl $curl_options -o "$out_path" "$remote_path_with_credential" || failed=true
fi
if [ "$failed" = true ]; then
say_verbose "Curl download failed"
local response=$(get_http_header_curl $remote_path_with_credential)
http_code=$( echo "$response" | awk '/^HTTP/{print $2}' | tail -1 )
download_error_msg="Unable to download $remote_path."
if [[ $http_code != 2* ]]; then
download_error_msg+=" Returned HTTP status code: $http_code."
fi
say_verbose "$download_error_msg"
return 1
fi
return 0
}
# Updates global variables $http_code and $download_error_msg
downloadwget() {
eval $invocation
unset http_code
unset download_error_msg
local remote_path="$1"
local out_path="${2:-}"
# Append feed_credential as late as possible before calling wget to avoid logging feed_credential
remote_path="${remote_path}${feed_credential}"
local remote_path_with_credential="${remote_path}${feed_credential}"
local wget_options="--tries 20 --waitretry 2 --connect-timeout 15 "
local failed=false
if [ -z "$out_path" ]; then
wget -q $wget_options -O - "$remote_path" || failed=true
wget -q $wget_options -O - "$remote_path_with_credential" || failed=true
else
wget $wget_options -O "$out_path" "$remote_path" || failed=true
wget $wget_options -O "$out_path" "$remote_path_with_credential" || failed=true
fi
if [ "$failed" = true ]; then
say_verbose "Wget download failed"
local response=$(get_http_header_wget $remote_path_with_credential)
http_code=$( echo "$response" | awk '/^ HTTP/{print $2}' | tail -1 )
download_error_msg="Unable to download $remote_path."
if [[ $http_code != 2* ]]; then
download_error_msg+=" Returned HTTP status code: $http_code."
fi
say_verbose "$download_error_msg"
return 1
fi
return 0
@@ -770,14 +841,18 @@ calculate_vars() {
normalized_architecture="$(get_normalized_architecture_from_architecture "$architecture")"
say_verbose "normalized_architecture=$normalized_architecture"
normalized_os="$(get_normalized_os "$user_defined_os")"
say_verbose "normalized_os=$normalized_os"
specific_version="$(get_specific_version_from_version "$azure_feed" "$channel" "$normalized_architecture" "$version" "$json_file")"
specific_product_version="$(get_specific_product_version "$azure_feed" "$specific_version")"
say_verbose "specific_version=$specific_version"
if [ -z "$specific_version" ]; then
say_err "Could not resolve version information."
return 1
fi
download_link="$(construct_download_link "$azure_feed" "$channel" "$normalized_architecture" "$specific_version")"
download_link="$(construct_download_link "$azure_feed" "$channel" "$normalized_architecture" "$specific_version" "$normalized_os")"
say_verbose "Constructed primary named payload URL: $download_link"
legacy_download_link="$(construct_legacy_download_link "$azure_feed" "$channel" "$normalized_architecture" "$specific_version")" || valid_legacy_download_link=false
@@ -822,38 +897,74 @@ install_dotnet() {
zip_path="$(mktemp "$temporary_file_template")"
say_verbose "Zip path: $zip_path"
say "Downloading link: $download_link"
# Failures are normal in the non-legacy case for ultimately legacy downloads.
# Do not output to stderr, since output to stderr is considered an error.
say "Downloading primary link $download_link"
# The download function will set variables $http_code and $download_error_msg in case of failure.
download "$download_link" "$zip_path" 2>&1 || download_failed=true
# if the download fails, download the legacy_download_link
if [ "$download_failed" = true ]; then
say "Cannot download: $download_link"
primary_path_http_code="$http_code"; primary_path_download_error_msg="$download_error_msg"
case $primary_path_http_code in
404)
say "The resource at $download_link is not available."
;;
*)
say "$primary_path_download_error_msg"
;;
esac
rm -f "$zip_path" 2>&1 && say_verbose "Temporary zip file $zip_path was removed"
if [ "$valid_legacy_download_link" = true ]; then
download_failed=false
download_link="$legacy_download_link"
zip_path="$(mktemp "$temporary_file_template")"
say_verbose "Legacy zip path: $zip_path"
say "Downloading legacy link: $download_link"
say "Downloading legacy link $download_link"
# The download function will set variables $http_code and $download_error_msg in case of failure.
download "$download_link" "$zip_path" 2>&1 || download_failed=true
if [ "$download_failed" = true ]; then
say "Cannot download: $download_link"
legacy_path_http_code="$http_code"; legacy_path_download_error_msg="$download_error_msg"
case $legacy_path_http_code in
404)
say "The resource at $download_link is not available."
;;
*)
say "$legacy_path_download_error_msg"
;;
esac
rm -f "$zip_path" 2>&1 && say_verbose "Temporary zip file $zip_path was removed"
fi
fi
fi
if [ "$download_failed" = true ]; then
say_err "Could not find/download: \`$asset_name\` with version = $specific_version"
if [[ "$primary_path_http_code" = "404" && ( "$valid_legacy_download_link" = false || "$legacy_path_http_code" = "404") ]]; then
say_err "Could not find \`$asset_name\` with version = $specific_version"
say_err "Refer to: https://aka.ms/dotnet-os-lifecycle for information on .NET Core support"
else
say_err "Could not download: \`$asset_name\` with version = $specific_version"
# 404-NotFound is an expected response if it goes from only one of the links, do not show that error.
# If primary path is available (not 404-NotFound) then show the primary error else show the legacy error.
if [ "$primary_path_http_code" != "404" ]; then
say_err "$primary_path_download_error_msg"
return 1
fi
if [[ "$valid_legacy_download_link" = true && "$legacy_path_http_code" != "404" ]]; then
say_err "$legacy_path_download_error_msg"
return 1
fi
fi
return 1
fi
say "Extracting zip from $download_link"
extract_dotnet_package "$zip_path" "$install_root"
extract_dotnet_package "$zip_path" "$install_root" || return 1
# Check if the SDK version is installed; if not, fail the installation.
# if the version contains "RTM" or "servicing"; check if a 'release-type' SDK version is installed.
@@ -869,12 +980,14 @@ install_dotnet() {
fi
# Check if the standard SDK version is installed.
say_verbose "Checking installation: version = $specific_version"
if is_dotnet_package_installed "$install_root" "$asset_relative_path" "$specific_version"; then
say_verbose "Checking installation: version = $specific_product_version"
if is_dotnet_package_installed "$install_root" "$asset_relative_path" "$specific_product_version"; then
return 0
fi
say_err "\`$asset_name\` with version = $specific_version failed to install with an unknown error."
# Version verification failed. More likely something is wrong either with the downloaded content or with the verification algorithm.
say_err "Failed to verify the version of installed \`$asset_name\`.\nInstallation source: $download_link.\nInstallation location: $install_root.\nReport the bug at https://github.com/dotnet/install-scripts/issues."
say_err "\`$asset_name\` with version = $specific_product_version failed to install with an unknown error."
return 1
}
@@ -900,6 +1013,7 @@ runtime=""
runtime_id=""
override_non_versioned_files=true
non_dynamic_parameters=""
user_defined_os=""
while [ $# -ne 0 ]
do
@@ -921,6 +1035,10 @@ do
shift
architecture="$1"
;;
--os|-[Oo][SS])
shift
user_defined_os="$1"
;;
--shared-runtime|-[Ss]hared[Rr]untime)
say_warning "The --shared-runtime flag is obsolete and may be removed in a future version of this script. The recommended usage is to specify '--runtime dotnet'."
if [ -z "$runtime" ]; then
@@ -972,6 +1090,7 @@ do
shift
runtime_id="$1"
non_dynamic_parameters+=" $name "\""$1"\"""
say_warning "Use of --runtime-id is obsolete and should be limited to the versions below 2.1. To override architecture, use --architecture option instead. To override OS, use --os option instead."
;;
--jsonfile|-[Jj][Ss]on[Ff]ile)
shift
@@ -1004,8 +1123,6 @@ do
echo " -Version"
echo " Possible values:"
echo " - latest - most latest build on specific channel"
echo " - coherent - most latest coherent build on specific channel"
echo " coherent applies only to SDK downloads"
echo " - 3-part version in a format A.B.C - represents specific version of build"
echo " examples: 2.0.0-preview2-006120; 1.1.0"
echo " -i,--install-dir <DIR> Install under specified location (see Install Location below)"
@@ -1013,6 +1130,11 @@ do
echo " --architecture <ARCHITECTURE> Architecture of dotnet binaries to be installed, Defaults to \`$architecture\`."
echo " --arch,-Architecture,-Arch"
echo " Possible values: x64, arm, and arm64"
echo " --os <system> Specifies operating system to be used when selecting the installer."
echo " Overrides the OS determination approach used by the script. Supported values: osx, linux, linux-musl, freebsd, rhel.6."
echo " In case any other value is provided, the platform will be determined by the script based on machine configuration."
echo " Not supported for legacy links. Use --runtime-id to specify platform for legacy links."
echo " Refer to: https://aka.ms/dotnet-os-lifecycle for more information."
echo " --runtime <RUNTIME> Installs a shared runtime only, without the SDK."
echo " -Runtime"
echo " Possible values:"
@@ -1029,14 +1151,15 @@ do
echo " --no-cdn,-NoCdn Disable downloading from the Azure CDN, and use the uncached feed directly."
echo " --jsonfile <JSONFILE> Determines the SDK version from a user specified global.json file."
echo " Note: global.json must have a value for 'SDK:Version'"
echo " --runtime-id Installs the .NET Tools for the given platform (use linux-x64 for portable linux)."
echo " -RuntimeId"
echo " -?,--?,-h,--help,-Help Shows this help message"
echo ""
echo "Obsolete parameters:"
echo " --shared-runtime The recommended alternative is '--runtime dotnet'."
echo " This parameter is obsolete and may be removed in a future version of this script."
echo " Installs just the shared runtime bits, not the entire SDK."
echo " --runtime-id Installs the .NET Tools for the given platform (use linux-x64 for portable linux)."
echo " -RuntimeId" The parameter is obsolete and may be removed in a future version of this script. Should be used only for versions below 2.1.
echo " For primary links to override OS or/and architecture, use --os and --architecture option instead."
echo ""
echo "Install Location:"
echo " Location is chosen in following order:"
@@ -1058,6 +1181,11 @@ if [ "$no_cdn" = true ]; then
azure_feed="$uncached_feed"
fi
say "Note that the intended use of this script is for Continuous Integration (CI) scenarios, where:"
say "- The SDK needs to be installed without user interaction and without admin rights."
say "- The SDK installation doesn't need to persist across multiple CI runs."
say "To set up a development environment or to run apps, use installers rather than this script. Visit https://dotnet.microsoft.com/download to get the installer.\n"
check_min_reqs
calculate_vars
script_name=$(basename "$0")
@@ -1068,7 +1196,7 @@ if [ "$dry_run" = true ]; then
if [ "$valid_legacy_download_link" = true ]; then
say "Legacy named payload URL: $legacy_download_link"
fi
repeatable_command="./$script_name --version "\""$specific_version"\"" --install-dir "\""$install_root"\"" --architecture "\""$normalized_architecture"\"""
repeatable_command="./$script_name --version "\""$specific_version"\"" --install-dir "\""$install_root"\"" --architecture "\""$normalized_architecture"\"" --os "\""$normalized_os"\"""
if [[ "$runtime" == "dotnet" ]]; then
repeatable_command+=" --runtime "\""dotnet"\"""
elif [[ "$runtime" == "aspnetcore" ]]; then
@@ -1079,7 +1207,6 @@ if [ "$dry_run" = true ]; then
exit 0
fi
check_pre_reqs
install_dotnet
bin_path="$(get_absolute_path "$(combine_paths "$install_root" "$bin_folder_relative_path")")"
@@ -1090,4 +1217,6 @@ else
say "Binaries of dotnet can be found in $bin_path"
fi
say "Note that the script does not resolve dependencies during installation."
say "To check the list of dependencies, go to https://docs.microsoft.com/dotnet/core/install, select your operating system and check the \"Dependencies\" section."
say "Installation finished successfully."

View File

@@ -1,6 +1,6 @@
{
"plugins": ["jest", "@typescript-eslint"],
"extends": ["plugin:github/es6"],
"plugins": ["@typescript-eslint"],
"extends": ["plugin:github/recommended"],
"parser": "@typescript-eslint/parser",
"parserOptions": {
"ecmaVersion": 9,
@@ -17,13 +17,16 @@
"@typescript-eslint/no-require-imports": "error",
"@typescript-eslint/array-type": "error",
"@typescript-eslint/await-thenable": "error",
"@typescript-eslint/ban-ts-ignore": "error",
"@typescript-eslint/naming-convention": [
"error",
{
"selector": "default",
"format": ["camelCase"]
}
],
"camelcase": "off",
"@typescript-eslint/camelcase": "error",
"@typescript-eslint/class-name-casing": "error",
"@typescript-eslint/explicit-function-return-type": ["error", {"allowExpressions": true}],
"@typescript-eslint/func-call-spacing": ["error", "never"],
"@typescript-eslint/generic-type-naming": ["error", "^[A-Z][A-Za-z]*$"],
"@typescript-eslint/no-array-constructor": "error",
"@typescript-eslint/no-empty-interface": "error",
"@typescript-eslint/no-explicit-any": "error",
@@ -33,7 +36,6 @@
"@typescript-eslint/no-misused-new": "error",
"@typescript-eslint/no-namespace": "error",
"@typescript-eslint/no-non-null-assertion": "warn",
"@typescript-eslint/no-object-literal-type-assertion": "error",
"@typescript-eslint/no-unnecessary-qualifier": "error",
"@typescript-eslint/no-unnecessary-type-assertion": "error",
"@typescript-eslint/no-useless-constructor": "error",
@@ -41,19 +43,19 @@
"@typescript-eslint/prefer-for-of": "warn",
"@typescript-eslint/prefer-function-type": "warn",
"@typescript-eslint/prefer-includes": "error",
"@typescript-eslint/prefer-interface": "error",
"@typescript-eslint/prefer-string-starts-ends-with": "error",
"@typescript-eslint/promise-function-async": "error",
"@typescript-eslint/require-array-sort-compare": "error",
"@typescript-eslint/restrict-plus-operands": "error",
"semi": "off",
"@typescript-eslint/semi": ["error", "never"],
"@typescript-eslint/type-annotation-spacing": "error",
"@typescript-eslint/unbound-method": "error"
"@typescript-eslint/unbound-method": "error",
"filenames/match-regex" : "off",
"github/no-then" : 1, // warning
"semi": "off"
},
"env": {
"node": true,
"es6": true,
"jest/globals": true
"es6": true
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -25,10 +25,10 @@
},
"devDependencies": {
"@types/node": "^12.7.12",
"@typescript-eslint/parser": "^2.8.0",
"@typescript-eslint/parser": "^5.15.0",
"@zeit/ncc": "^0.20.5",
"eslint": "^6.8.0",
"eslint-plugin-github": "^2.0.0",
"eslint": "^8.11.0",
"eslint-plugin-github": "^4.3.5",
"prettier": "^1.19.1",
"typescript": "^3.6.4"
}

View File

@@ -1,9 +1,9 @@
import * as glob from '@actions/glob'
import * as crypto from 'crypto'
import * as fs from 'fs'
import * as glob from '@actions/glob'
import * as path from 'path'
import * as stream from 'stream'
import * as util from 'util'
import * as path from 'path'
async function run(): Promise<void> {
// arg0 -> node
@@ -45,7 +45,7 @@ async function run(): Promise<void> {
result.end()
if (hasMatch) {
console.log(`Find ${count} files to hash.`)
console.log(`Found ${count} files to hash.`)
console.error(`__OUTPUT__${result.digest('hex')}__OUTPUT__`)
} else {
console.error(`__OUTPUT____OUTPUT__`)
@@ -53,3 +53,11 @@ async function run(): Promise<void> {
}
run()
.then(out => {
console.log(out)
process.exit(0)
})
.catch(err => {
console.error(err)
process.exit(1)
})

View File

@@ -3,7 +3,8 @@ PACKAGERUNTIME=$1
PRECACHE=$2
NODE_URL=https://nodejs.org/dist
NODE12_VERSION="12.13.1"
NODE12_VERSION="12.22.7"
NODE16_VERSION="16.13.0"
get_abs_path() {
# exploits the fact that pwd will print abs path when no args
@@ -126,6 +127,8 @@ function acquireExternalTool() {
if [[ "$PACKAGERUNTIME" == "win-x64" || "$PACKAGERUNTIME" == "win-x86" ]]; then
acquireExternalTool "$NODE_URL/v${NODE12_VERSION}/$PACKAGERUNTIME/node.exe" node12/bin
acquireExternalTool "$NODE_URL/v${NODE12_VERSION}/$PACKAGERUNTIME/node.lib" node12/bin
acquireExternalTool "$NODE_URL/v${NODE16_VERSION}/$PACKAGERUNTIME/node.exe" node16/bin
acquireExternalTool "$NODE_URL/v${NODE16_VERSION}/$PACKAGERUNTIME/node.lib" node16/bin
if [[ "$PRECACHE" != "" ]]; then
acquireExternalTool "https://github.com/microsoft/vswhere/releases/download/2.6.7/vswhere.exe" vswhere
fi
@@ -134,18 +137,28 @@ fi
# Download the external tools only for OSX.
if [[ "$PACKAGERUNTIME" == "osx-x64" ]]; then
acquireExternalTool "$NODE_URL/v${NODE12_VERSION}/node-v${NODE12_VERSION}-darwin-x64.tar.gz" node12 fix_nested_dir
acquireExternalTool "$NODE_URL/v${NODE16_VERSION}/node-v${NODE16_VERSION}-darwin-x64.tar.gz" node16 fix_nested_dir
fi
if [[ "$PACKAGERUNTIME" == "osx-arm64" ]]; then
# node.js v12 doesn't support macOS on arm64.
acquireExternalTool "$NODE_URL/v${NODE16_VERSION}/node-v${NODE16_VERSION}-darwin-arm64.tar.gz" node16 fix_nested_dir
fi
# Download the external tools for Linux PACKAGERUNTIMEs.
if [[ "$PACKAGERUNTIME" == "linux-x64" ]]; then
acquireExternalTool "$NODE_URL/v${NODE12_VERSION}/node-v${NODE12_VERSION}-linux-x64.tar.gz" node12 fix_nested_dir
acquireExternalTool "https://vstsagenttools.blob.core.windows.net/tools/nodejs/${NODE12_VERSION}/alpine/x64/node-${NODE12_VERSION}-alpine-x64.tar.gz" node12_alpine
acquireExternalTool "https://vstsagenttools.blob.core.windows.net/tools/nodejs/${NODE12_VERSION}/alpine/x64/node-v${NODE12_VERSION}-alpine-x64.tar.gz" node12_alpine
acquireExternalTool "$NODE_URL/v${NODE16_VERSION}/node-v${NODE16_VERSION}-linux-x64.tar.gz" node16 fix_nested_dir
acquireExternalTool "https://vstsagenttools.blob.core.windows.net/tools/nodejs/${NODE16_VERSION}/alpine/x64/node-v${NODE16_VERSION}-alpine-x64.tar.gz" node16_alpine
fi
if [[ "$PACKAGERUNTIME" == "linux-arm64" ]]; then
acquireExternalTool "$NODE_URL/v${NODE12_VERSION}/node-v${NODE12_VERSION}-linux-arm64.tar.gz" node12 fix_nested_dir
acquireExternalTool "$NODE_URL/v${NODE16_VERSION}/node-v${NODE16_VERSION}-linux-arm64.tar.gz" node16 fix_nested_dir
fi
if [[ "$PACKAGERUNTIME" == "linux-arm" ]]; then
acquireExternalTool "$NODE_URL/v${NODE12_VERSION}/node-v${NODE12_VERSION}-linux-armv7l.tar.gz" node12 fix_nested_dir
acquireExternalTool "$NODE_URL/v${NODE16_VERSION}/node-v${NODE16_VERSION}-linux-armv7l.tar.gz" node16 fix_nested_dir
fi

View File

@@ -3,89 +3,135 @@
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
var childProcess = require("child_process");
var path = require("path")
var path = require("path");
var supported = ['linux', 'darwin']
var supported = ["linux", "darwin"];
if (supported.indexOf(process.platform) == -1) {
console.log('Unsupported platform: ' + process.platform);
console.log('Supported platforms are: ' + supported.toString());
console.log("Unsupported platform: " + process.platform);
console.log("Supported platforms are: " + supported.toString());
process.exit(1);
}
var stopping = false;
var listener = null;
var runService = function() {
var listenerExePath = path.join(__dirname, '../bin/Runner.Listener');
var exitServiceAfterNFailures = Number(
process.env.GITHUB_ACTIONS_SERVICE_EXIT_AFTER_N_FAILURES
);
if (exitServiceAfterNFailures <= 0) {
exitServiceAfterNFailures = NaN;
}
var consecutiveFailureCount = 0;
var gracefulShutdown = function () {
console.log("Shutting down runner listener");
stopping = true;
if (listener) {
console.log("Sending SIGINT to runner listener to stop");
listener.kill("SIGINT");
console.log("Sending SIGKILL to runner listener");
setTimeout(() => listener.kill("SIGKILL"), 30000).unref();
}
};
var runService = function () {
var listenerExePath = path.join(__dirname, "../bin/Runner.Listener");
var interactive = process.argv[2] === "interactive";
if(!stopping) {
if (!stopping) {
try {
if (interactive) {
console.log('Starting Runner listener interactively');
listener = childProcess.spawn(listenerExePath, ['run'], { env: process.env });
console.log("Starting Runner listener interactively");
listener = childProcess.spawn(listenerExePath, ["run"], {
env: process.env,
});
} else {
console.log('Starting Runner listener with startup type: service');
listener = childProcess.spawn(listenerExePath, ['run', '--startuptype', 'service'], { env: process.env });
console.log("Starting Runner listener with startup type: service");
listener = childProcess.spawn(
listenerExePath,
["run", "--startuptype", "service"],
{ env: process.env }
);
}
console.log('Started listener process');
console.log(`Started listener process, pid: ${listener.pid}`);
listener.stdout.on('data', (data) => {
process.stdout.write(data.toString('utf8'));
listener.stdout.on("data", (data) => {
if (data.toString("utf8").includes("Listening for Jobs")) {
consecutiveFailureCount = 0;
}
process.stdout.write(data.toString("utf8"));
});
listener.stderr.on('data', (data) => {
process.stdout.write(data.toString('utf8'));
listener.stderr.on("data", (data) => {
process.stdout.write(data.toString("utf8"));
});
listener.on('close', (code) => {
listener.on("error", (err) => {
console.log(`Runner listener fail to start with error ${err.message}`);
});
listener.on("close", (code) => {
console.log(`Runner listener exited with error code ${code}`);
if (code === 0) {
console.log('Runner listener exit with 0 return code, stop the service, no retry needed.');
console.log(
"Runner listener exit with 0 return code, stop the service, no retry needed."
);
stopping = true;
} else if (code === 1) {
console.log('Runner listener exit with terminated error, stop the service, no retry needed.');
console.log(
"Runner listener exit with terminated error, stop the service, no retry needed."
);
stopping = true;
} else if (code === 2) {
console.log('Runner listener exit with retryable error, re-launch runner in 5 seconds.');
} else if (code === 3) {
console.log('Runner listener exit because of updating, re-launch runner in 5 seconds.');
console.log(
"Runner listener exit with retryable error, re-launch runner in 5 seconds."
);
consecutiveFailureCount = 0;
} else if (code === 3 || code === 4) {
console.log(
"Runner listener exit because of updating, re-launch runner in 5 seconds."
);
consecutiveFailureCount = 0;
} else {
console.log('Runner listener exit with undefined return code, re-launch runner in 5 seconds.');
var messagePrefix = "Runner listener exit with undefined return code";
consecutiveFailureCount++;
if (
!isNaN(exitServiceAfterNFailures) &&
consecutiveFailureCount >= exitServiceAfterNFailures
) {
console.error(
`${messagePrefix}, exiting service after ${consecutiveFailureCount} consecutive failures`
);
gracefulShutdown();
return;
} else {
console.log(`${messagePrefix}, re-launch runner in 5 seconds.`);
}
}
if(!stopping) {
if (!stopping) {
setTimeout(runService, 5000);
}
});
} catch(ex) {
} catch (ex) {
console.log(ex);
}
}
}
};
runService();
console.log('Started running service');
console.log("Started running service");
var gracefulShutdown = function(code) {
console.log('Shutting down runner listener');
stopping = true;
if (listener) {
console.log('Sending SIGINT to runner listener to stop');
listener.kill('SIGINT');
// TODO wait for 30 seconds and send a SIGKILL
}
}
process.on('SIGINT', () => {
gracefulShutdown(0);
process.on("SIGINT", () => {
gracefulShutdown();
});
process.on('SIGTERM', () => {
gracefulShutdown(0);
process.on("SIGTERM", () => {
gracefulShutdown();
});

View File

@@ -23,5 +23,9 @@
<key>ACTIONS_RUNNER_SVC</key>
<string>1</string>
</dict>
<key>ProcessType</key>
<string>Interactive</string>
<key>SessionCreate</key>
<true/>
</dict>
</plist>

View File

@@ -0,0 +1,115 @@
const https = require('https')
const fs = require('fs')
const http = require('http')
const hostname = process.env['HOSTNAME'] || ''
const port = process.env['PORT'] || ''
const path = process.env['PATH'] || ''
const pat = process.env['PAT'] || ''
const proxyHost = process.env['PROXYHOST'] || ''
const proxyPort = process.env['PROXYPORT'] || ''
const proxyUsername = process.env['PROXYUSERNAME'] || ''
const proxyPassword = process.env['PROXYPASSWORD'] || ''
process.env['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'
if (proxyHost === '') {
const options = {
hostname: hostname,
port: port,
path: path,
method: 'GET',
headers: {
'User-Agent': 'GitHubActionsRunnerCheck/1.0',
'Authorization': `token ${pat}`
},
}
const req = https.request(options, res => {
console.log(`statusCode: ${res.statusCode}`)
console.log(`headers: ${JSON.stringify(res.headers)}`)
let cert = socket.getPeerCertificate(true)
let certPEM = ''
let fingerprints = {}
while (cert != null && fingerprints[cert.fingerprint] != '1') {
fingerprints[cert.fingerprint] = '1'
certPEM = certPEM + '-----BEGIN CERTIFICATE-----\n'
let certEncoded = cert.raw.toString('base64')
for (let i = 0; i < certEncoded.length; i++) {
certPEM = certPEM + certEncoded[i]
if (i != certEncoded.length - 1 && (i + 1) % 64 == 0) {
certPEM = certPEM + '\n'
}
}
certPEM = certPEM + '\n-----END CERTIFICATE-----\n'
cert = cert.issuerCertificate
}
console.log(certPEM)
fs.writeFileSync('./download_ca_cert.pem', certPEM)
res.on('data', d => {
process.stdout.write(d)
})
})
req.on('error', error => {
console.error(error)
})
req.end()
}
else {
const auth = 'Basic ' + Buffer.from(proxyUsername + ':' + proxyPassword).toString('base64')
const options = {
host: proxyHost,
port: proxyPort,
method: 'CONNECT',
path: `${hostname}:${port}`,
}
if (proxyUsername != '' || proxyPassword != '') {
options.headers = {
'Proxy-Authorization': auth,
}
}
http.request(options).on('connect', (res, socket) => {
if (res.statusCode != 200) {
throw new Error(`Proxy returns code: ${res.statusCode}`)
}
https.get({
host: hostname,
port: port,
socket: socket,
agent: false,
path: '/',
headers: {
'User-Agent': 'GitHubActionsRunnerCheck/1.0',
'Authorization': `token ${pat}`
}
}, (res) => {
let cert = res.socket.getPeerCertificate(true)
let certPEM = ''
let fingerprints = {}
while (cert != null && fingerprints[cert.fingerprint] != '1') {
fingerprints[cert.fingerprint] = '1'
certPEM = certPEM + '-----BEGIN CERTIFICATE-----\n'
let certEncoded = cert.raw.toString('base64')
for (let i = 0; i < certEncoded.length; i++) {
certPEM = certPEM + certEncoded[i]
if (i != certEncoded.length - 1 && (i + 1) % 64 == 0) {
certPEM = certPEM + '\n'
}
}
certPEM = certPEM + '\n-----END CERTIFICATE-----\n'
cert = cert.issuerCertificate
}
console.log(certPEM)
fs.writeFileSync('./download_ca_cert.pem', certPEM)
console.log(`statusCode: ${res.statusCode}`)
console.log(`headers: ${JSON.stringify(res.headers)}`)
res.on('data', d => {
process.stdout.write(d)
})
})
}).on('error', (err) => {
console.error('error', err)
}).end()
}

View File

@@ -0,0 +1,75 @@
const https = require('https')
const http = require('http')
const hostname = process.env['HOSTNAME'] || ''
const port = process.env['PORT'] || ''
const path = process.env['PATH'] || ''
const pat = process.env['PAT'] || ''
const proxyHost = process.env['PROXYHOST'] || ''
const proxyPort = process.env['PROXYPORT'] || ''
const proxyUsername = process.env['PROXYUSERNAME'] || ''
const proxyPassword = process.env['PROXYPASSWORD'] || ''
if (proxyHost === '') {
const options = {
hostname: hostname,
port: port,
path: path,
method: 'GET',
headers: {
'User-Agent': 'GitHubActionsRunnerCheck/1.0',
'Authorization': `token ${pat}`,
}
}
const req = https.request(options, res => {
console.log(`statusCode: ${res.statusCode}`)
console.log(`headers: ${JSON.stringify(res.headers)}`)
res.on('data', d => {
process.stdout.write(d)
})
})
req.on('error', error => {
console.error(error)
})
req.end()
}
else {
const proxyAuth = 'Basic ' + Buffer.from(proxyUsername + ':' + proxyPassword).toString('base64')
const options = {
hostname: proxyHost,
port: proxyPort,
method: 'CONNECT',
path: `${hostname}:${port}`
}
if (proxyUsername != '' || proxyPassword != '') {
options.headers = {
'Proxy-Authorization': proxyAuth,
}
}
http.request(options).on('connect', (res, socket) => {
if (res.statusCode != 200) {
throw new Error(`Proxy returns code: ${res.statusCode}`)
}
https.get({
host: hostname,
port: port,
socket: socket,
agent: false,
path: path,
headers: {
'User-Agent': 'GitHubActionsRunnerCheck/1.0',
'Authorization': `token ${pat}`,
}
}, (res) => {
console.log(`statusCode: ${res.statusCode}`)
console.log(`headers: ${JSON.stringify(res.headers)}`)
res.on('data', d => {
process.stdout.write(d)
})
})
}).on('error', (err) => {
console.error('error', err)
}).end()
}

View File

@@ -1,6 +1,7 @@
#!/bin/bash
SVC_NAME="{{SvcNameVar}}"
SVC_NAME=${SVC_NAME// /_}
SVC_DESCRIPTION="{{SvcDescription}}"
user_id=`id -u`
@@ -16,7 +17,13 @@ RUNNER_ROOT=`pwd`
LAUNCH_PATH="${HOME}/Library/LaunchAgents"
PLIST_PATH="${LAUNCH_PATH}/${SVC_NAME}.plist"
TEMPLATE_PATH=./bin/actions.runner.plist.template
TEMPLATE_PATH=$GITHUB_ACTIONS_RUNNER_SERVICE_TEMPLATE
IS_CUSTOM_TEMPLATE=0
if [[ -z $TEMPLATE_PATH ]]; then
TEMPLATE_PATH=./bin/actions.runner.plist.template
else
IS_CUSTOM_TEMPLATE=1
fi
TEMP_PATH=./bin/actions.runner.plist.temp
CONFIG_PATH=.service
@@ -28,7 +35,11 @@ function failed()
}
if [ ! -f "${TEMPLATE_PATH}" ]; then
if [[ $IS_CUSTOM_TEMPLATE = 0 ]]; then
failed "Must run from runner root or install is corrupt"
else
failed "Service file at '$GITHUB_ACTIONS_RUNNER_SERVICE_TEMPLATE' using GITHUB_ACTIONS_RUNNER_SERVICE_TEMPLATE env variable is not found"
fi
fi
function install()
@@ -52,7 +63,7 @@ function install()
mkdir -p "${log_path}" || failed "failed to create ${log_path}"
echo Creating ${PLIST_PATH}
sed "s/{{User}}/${SUDO_USER:-$USER}/g; s/{{SvcName}}/$SVC_NAME/g; s@{{RunnerRoot}}@${RUNNER_ROOT}@g; s@{{UserHome}}@$HOME@g;" "${TEMPLATE_PATH}" > "${TEMP_PATH}" || failed "failed to create replacement temp file"
sed "s/{{User}}/${USER:-$SUDO_USER}/g; s/{{SvcName}}/$SVC_NAME/g; s@{{RunnerRoot}}@${RUNNER_ROOT}@g; s@{{UserHome}}@$HOME@g;" "${TEMPLATE_PATH}" > "${TEMP_PATH}" || failed "failed to create replacement temp file"
mv "${TEMP_PATH}" "${PLIST_PATH}" || failed "failed to copy plist"
# Since we started with sudo, runsvc.sh will be owned by root. Change this to current login user.

View File

@@ -43,6 +43,32 @@ module.exports =
/************************************************************************/
/******/ ({
/***/ 82:
/***/ (function(__unusedmodule, exports) {
"use strict";
// We use any as a valid input type
/* eslint-disable @typescript-eslint/no-explicit-any */
Object.defineProperty(exports, "__esModule", { value: true });
/**
* Sanitizes an input into a string so it can be passed into issueCommand safely
* @param input input to sanitize into a string
*/
function toCommandValue(input) {
if (input === null || input === undefined) {
return '';
}
else if (typeof input === 'string' || input instanceof String) {
return input;
}
return JSON.stringify(input);
}
exports.toCommandValue = toCommandValue;
//# sourceMappingURL=utils.js.map
/***/ }),
/***/ 87:
/***/ (function(module) {
@@ -978,6 +1004,42 @@ function regExpEscape (s) {
}
/***/ }),
/***/ 102:
/***/ (function(__unusedmodule, exports, __webpack_require__) {
"use strict";
// For internal use, subject to change.
var __importStar = (this && this.__importStar) || function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];
result["default"] = mod;
return result;
};
Object.defineProperty(exports, "__esModule", { value: true });
// We use any as a valid input type
/* eslint-disable @typescript-eslint/no-explicit-any */
const fs = __importStar(__webpack_require__(747));
const os = __importStar(__webpack_require__(87));
const utils_1 = __webpack_require__(82);
function issueCommand(command, message) {
const filePath = process.env[`GITHUB_${command}`];
if (!filePath) {
throw new Error(`Unable to find environment variable for file command ${command}`);
}
if (!fs.existsSync(filePath)) {
throw new Error(`Missing file at path: ${filePath}`);
}
fs.appendFileSync(filePath, `${utils_1.toCommandValue(message)}${os.EOL}`, {
encoding: 'utf8'
});
}
exports.issueCommand = issueCommand;
//# sourceMappingURL=file-command.js.map
/***/ }),
/***/ 281:
@@ -1495,12 +1557,12 @@ var __importStar = (this && this.__importStar) || function (mod) {
return result;
};
Object.defineProperty(exports, "__esModule", { value: true });
const glob = __importStar(__webpack_require__(281));
const crypto = __importStar(__webpack_require__(417));
const fs = __importStar(__webpack_require__(747));
const glob = __importStar(__webpack_require__(281));
const path = __importStar(__webpack_require__(622));
const stream = __importStar(__webpack_require__(413));
const util = __importStar(__webpack_require__(669));
const path = __importStar(__webpack_require__(622));
function run() {
var e_1, _a;
return __awaiter(this, void 0, void 0, function* () {
@@ -1551,7 +1613,7 @@ function run() {
}
result.end();
if (hasMatch) {
console.log(`Find ${count} files to hash.`);
console.log(`Found ${count} files to hash.`);
console.error(`__OUTPUT__${result.digest('hex')}__OUTPUT__`);
}
else {
@@ -1559,7 +1621,15 @@ function run() {
}
});
}
run();
run()
.then(out => {
console.log(out);
process.exit(0);
})
.catch(err => {
console.error(err);
process.exit(1);
});
/***/ }),
@@ -1687,17 +1757,25 @@ module.exports = require("crypto");
"use strict";
var __importStar = (this && this.__importStar) || function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];
result["default"] = mod;
return result;
};
Object.defineProperty(exports, "__esModule", { value: true });
const os = __webpack_require__(87);
const os = __importStar(__webpack_require__(87));
const utils_1 = __webpack_require__(82);
/**
* Commands
*
* Command Format:
* ##[name key=value;key=value]message
* ::name key=value,key=value::message
*
* Examples:
* ##[warning]This is the user warning message
* ##[set-secret name=mypassword]definitelyNotAPassword!
* ::warning::This is the message
* ::set-env name=MY_VAR::some value
*/
function issueCommand(command, properties, message) {
const cmd = new Command(command, properties, message);
@@ -1722,34 +1800,39 @@ class Command {
let cmdStr = CMD_STRING + this.command;
if (this.properties && Object.keys(this.properties).length > 0) {
cmdStr += ' ';
let first = true;
for (const key in this.properties) {
if (this.properties.hasOwnProperty(key)) {
const val = this.properties[key];
if (val) {
// safely append the val - avoid blowing up when attempting to
// call .replace() if message is not a string for some reason
cmdStr += `${key}=${escape(`${val || ''}`)},`;
if (first) {
first = false;
}
else {
cmdStr += ',';
}
cmdStr += `${key}=${escapeProperty(val)}`;
}
}
}
}
cmdStr += CMD_STRING;
// safely append the message - avoid blowing up when attempting to
// call .replace() if message is not a string for some reason
const message = `${this.message || ''}`;
cmdStr += escapeData(message);
cmdStr += `${CMD_STRING}${escapeData(this.message)}`;
return cmdStr;
}
}
function escapeData(s) {
return s.replace(/\r/g, '%0D').replace(/\n/g, '%0A');
return utils_1.toCommandValue(s)
.replace(/%/g, '%25')
.replace(/\r/g, '%0D')
.replace(/\n/g, '%0A');
}
function escape(s) {
return s
function escapeProperty(s) {
return utils_1.toCommandValue(s)
.replace(/%/g, '%25')
.replace(/\r/g, '%0D')
.replace(/\n/g, '%0A')
.replace(/]/g, '%5D')
.replace(/;/g, '%3B');
.replace(/:/g, '%3A')
.replace(/,/g, '%2C');
}
//# sourceMappingURL=command.js.map
@@ -1769,10 +1852,19 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __importStar = (this && this.__importStar) || function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];
result["default"] = mod;
return result;
};
Object.defineProperty(exports, "__esModule", { value: true });
const command_1 = __webpack_require__(431);
const os = __webpack_require__(87);
const path = __webpack_require__(622);
const file_command_1 = __webpack_require__(102);
const utils_1 = __webpack_require__(82);
const os = __importStar(__webpack_require__(87));
const path = __importStar(__webpack_require__(622));
/**
* The code to exit an action
*/
@@ -1793,11 +1885,21 @@ var ExitCode;
/**
* Sets env variable for this action and future actions in the job
* @param name the name of the variable to set
* @param val the value of the variable
* @param val the value of the variable. Non-string values will be converted to a string via JSON.stringify
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function exportVariable(name, val) {
process.env[name] = val;
command_1.issueCommand('set-env', { name }, val);
const convertedVal = utils_1.toCommandValue(val);
process.env[name] = convertedVal;
const filePath = process.env['GITHUB_ENV'] || '';
if (filePath) {
const delimiter = '_GitHubActionsFileCommandDelimeter_';
const commandValue = `${name}<<${delimiter}${os.EOL}${convertedVal}${os.EOL}${delimiter}`;
file_command_1.issueCommand('ENV', commandValue);
}
else {
command_1.issueCommand('set-env', { name }, convertedVal);
}
}
exports.exportVariable = exportVariable;
/**
@@ -1813,7 +1915,13 @@ exports.setSecret = setSecret;
* @param inputPath
*/
function addPath(inputPath) {
const filePath = process.env['GITHUB_PATH'] || '';
if (filePath) {
file_command_1.issueCommand('PATH', inputPath);
}
else {
command_1.issueCommand('add-path', {}, inputPath);
}
process.env['PATH'] = `${inputPath}${path.delimiter}${process.env['PATH']}`;
}
exports.addPath = addPath;
@@ -1836,12 +1944,22 @@ exports.getInput = getInput;
* Sets the value of an output.
*
* @param name name of the output to set
* @param value value to store
* @param value value to store. Non-string values will be converted to a string via JSON.stringify
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function setOutput(name, value) {
command_1.issueCommand('set-output', { name }, value);
}
exports.setOutput = setOutput;
/**
* Enables or disables the echoing of commands into stdout for the rest of the step.
* Echoing is disabled by default if ACTIONS_STEP_DEBUG is not set.
*
*/
function setCommandEcho(enabled) {
command_1.issue('echo', enabled ? 'on' : 'off');
}
exports.setCommandEcho = setCommandEcho;
//-----------------------------------------------------------------------
// Results
//-----------------------------------------------------------------------
@@ -1858,6 +1976,13 @@ exports.setFailed = setFailed;
//-----------------------------------------------------------------------
// Logging Commands
//-----------------------------------------------------------------------
/**
* Gets whether Actions Step Debug is on or not
*/
function isDebug() {
return process.env['RUNNER_DEBUG'] === '1';
}
exports.isDebug = isDebug;
/**
* Writes debug message to user log
* @param message debug message
@@ -1868,18 +1993,18 @@ function debug(message) {
exports.debug = debug;
/**
* Adds an error issue
* @param message error issue message
* @param message error issue message. Errors will be converted to string via toString()
*/
function error(message) {
command_1.issue('error', message);
command_1.issue('error', message instanceof Error ? message.toString() : message);
}
exports.error = error;
/**
* Adds an warning issue
* @param message warning issue message
* @param message warning issue message. Errors will be converted to string via toString()
*/
function warning(message) {
command_1.issue('warning', message);
command_1.issue('warning', message instanceof Error ? message.toString() : message);
}
exports.warning = warning;
/**
@@ -1937,8 +2062,9 @@ exports.group = group;
* Saves state for current action, the state can only be retrieved by this action's post job execution.
*
* @param name name of the state to store
* @param value value to store
* @param value value to store. Non-string values will be converted to a string via JSON.stringify
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function saveState(name, value) {
command_1.issueCommand('save-state', { name }, value);
}

View File

@@ -9,7 +9,7 @@ fi
# Determine OS type
# Debian based OS (Debian, Ubuntu, Linux Mint) has /etc/debian_version
# Fedora based OS (Fedora, Redhat, Centos, Oracle Linux 7) has /etc/redhat-release
# Fedora based OS (Fedora, Red Hat Enterprise Linux, CentOS, Oracle Linux 7) has /etc/redhat-release
# SUSE based OS (OpenSUSE, SUSE Enterprise) has ID_LIKE=suse in /etc/os-release
function print_errormessage()
@@ -49,79 +49,83 @@ then
cat /etc/debian_version
echo "------------------------------"
# prefer apt over apt-get
command -v apt
if [ $? -eq 0 ]
then
apt update && apt install -y liblttng-ust0 libkrb5-3 zlib1g
if [ $? -ne 0 ]
then
echo "'apt' failed with exit code '$?'"
print_errormessage
exit 1
fi
# libissl version prefer: libssl1.1 -> libssl1.0.2 -> libssl1.0.0
apt install -y libssl1.1$ || apt install -y libssl1.0.2$ || apt install -y libssl1.0.0$
if [ $? -ne 0 ]
then
echo "'apt' failed with exit code '$?'"
print_errormessage
exit 1
fi
# libicu version prefer: libicu63 -> libicu60 -> libicu57 -> libicu55 -> libicu52
apt install -y libicu63 || apt install -y libicu60 || apt install -y libicu57 || apt install -y libicu55 || apt install -y libicu52
if [ $? -ne 0 ]
then
echo "'apt' failed with exit code '$?'"
print_errormessage
exit 1
fi
else
# prefer apt-get over apt
command -v apt-get
if [ $? -eq 0 ]
then
apt-get update && apt-get install -y liblttng-ust0 libkrb5-3 zlib1g
if [ $? -ne 0 ]
then
echo "'apt-get' failed with exit code '$?'"
print_errormessage
exit 1
fi
# libissl version prefer: libssl1.1 -> libssl1.0.2 -> libssl1.0.0
apt-get install -y libssl1.1$ || apt-get install -y libssl1.0.2$ || apt install -y libssl1.0.0$
if [ $? -ne 0 ]
then
echo "'apt-get' failed with exit code '$?'"
print_errormessage
exit 1
fi
# libicu version prefer: libicu63 -> libicu60 -> libicu57 -> libicu55 -> libicu52
apt-get install -y libicu63 || apt-get install -y libicu60 || apt install -y libicu57 || apt install -y libicu55 || apt install -y libicu52
if [ $? -ne 0 ]
then
echo "'apt-get' failed with exit code '$?'"
print_errormessage
exit 1
fi
apt_get=apt-get
else
echo "Can not find 'apt' or 'apt-get'"
command -v apt
if [ $? -eq 0 ]
then
apt_get=apt
else
echo "Found neither 'apt-get' nor 'apt'"
print_errormessage
exit 1
fi
fi
$apt_get update && $apt_get install -y libkrb5-3 zlib1g
if [ $? -ne 0 ]
then
echo "'$apt_get' failed with exit code '$?'"
print_errormessage
exit 1
fi
apt_get_with_fallbacks() {
$apt_get install -y $1
fail=$?
if [ $fail -eq 0 ]
then
if [ "${1#"${1%?}"}" = '$' ]; then
dpkg -l "${1%?}" > /dev/null 2> /dev/null
fail=$?
fi
fi
if [ $fail -ne 0 ]
then
shift
if [ -n "$1" ]
then
apt_get_with_fallbacks "$@"
fi
fi
}
apt_get_with_fallbacks liblttng-ust1 liblttng-ust0
if [ $? -ne 0 ]
then
echo "'$apt_get' failed with exit code '$?'"
print_errormessage
exit 1
fi
apt_get_with_fallbacks libssl1.1$ libssl1.0.2$ libssl1.0.0$
if [ $? -ne 0 ]
then
echo "'$apt_get' failed with exit code '$?'"
print_errormessage
exit 1
fi
apt_get_with_fallbacks libicu72 libicu71 libicu70 libicu69 libicu68 libicu67 libicu66 libicu65 libicu63 libicu60 libicu57 libicu55 libicu52
if [ $? -ne 0 ]
then
echo "'$apt_get' failed with exit code '$?'"
print_errormessage
exit 1
fi
elif [ -e /etc/redhat-release ]
then
echo "The current OS is Fedora based"
echo "--------Redhat Version--------"
echo "--Fedora/RHEL/CentOS Version--"
cat /etc/redhat-release
echo "------------------------------"
# use dnf on fedora
# use yum on centos and redhat
# use yum on centos and rhel
if [ -e /etc/fedora-release ]
then
command -v dnf
@@ -191,7 +195,7 @@ then
redhatRelease=$(</etc/redhat-release)
if [[ $redhatRelease == "CentOS release 6."* || $redhatRelease == "Red Hat Enterprise Linux Server release 6."* ]]
then
echo "The current OS is Red Hat Enterprise Linux 6 or Centos 6"
echo "The current OS is Red Hat Enterprise Linux 6 or CentOS 6"
# Install known dependencies, as a best effort.
# The remaining dependencies are covered by the GitHub doc that will be shown by `print_rhel6message`

View File

@@ -10,10 +10,11 @@ if [ -f ".path" ]; then
echo ".path=${PATH}"
fi
# insert anything to setup env when running as a service
nodever=${GITHUB_ACTIONS_RUNNER_FORCED_NODE_VERSION:-node16}
# insert anything to setup env when running as a service
# run the host process which keep the listener alive
./externals/node12/bin/node ./bin/RunnerService.js &
./externals/$nodever/bin/node ./bin/RunnerService.js &
PID=$!
wait $PID
trap - TERM INT

View File

@@ -1,6 +1,7 @@
#!/bin/bash
SVC_NAME="{{SvcNameVar}}"
SVC_NAME=${SVC_NAME// /_}
SVC_DESCRIPTION="{{SvcDescription}}"
SVC_CMD=$1
@@ -9,7 +10,13 @@ arg_2=${2}
RUNNER_ROOT=`pwd`
UNIT_PATH=/etc/systemd/system/${SVC_NAME}
TEMPLATE_PATH=./bin/actions.runner.service.template
TEMPLATE_PATH=$GITHUB_ACTIONS_RUNNER_SERVICE_TEMPLATE
IS_CUSTOM_TEMPLATE=0
if [[ -z $TEMPLATE_PATH ]]; then
TEMPLATE_PATH=./bin/actions.runner.service.template
else
IS_CUSTOM_TEMPLATE=1
fi
TEMP_PATH=./bin/actions.runner.service.temp
CONFIG_PATH=.service
@@ -30,7 +37,11 @@ function failed()
}
if [ ! -f "${TEMPLATE_PATH}" ]; then
if [[ $IS_CUSTOM_TEMPLATE = 0 ]]; then
failed "Must run from runner root or install is corrupt"
else
failed "Service file at '$GITHUB_ACTIONS_RUNNER_SERVICE_TEMPLATE' using GITHUB_ACTIONS_RUNNER_SERVICE_TEMPLATE env variable is not found"
fi
fi
#check if we run as root
@@ -63,8 +74,21 @@ function install()
sed "s/{{User}}/${run_as_user}/g; s/{{Description}}/$(echo ${SVC_DESCRIPTION} | sed -e 's/[\/&]/\\&/g')/g; s/{{RunnerRoot}}/$(echo ${RUNNER_ROOT} | sed -e 's/[\/&]/\\&/g')/g;" "${TEMPLATE_PATH}" > "${TEMP_PATH}" || failed "failed to create replacement temp file"
mv "${TEMP_PATH}" "${UNIT_PATH}" || failed "failed to copy unit file"
# Recent Fedora based Linux (CentOS/Redhat) has SELinux enabled by default
# We need to restore security context on the unit file we added otherwise SystemD have no access to it.
command -v getenforce > /dev/null
if [ $? -eq 0 ]
then
selinuxEnabled=$(getenforce)
if [[ $selinuxEnabled == "Enforcing" ]]
then
# SELinux is enabled, we will need to Restore SELinux Context for the service file
restorecon -r -v "${UNIT_PATH}" || failed "failed to restore SELinux context on ${UNIT_PATH}"
fi
fi
# unit file should not be executable and world writable
chmod 664 ${UNIT_PATH} || failed "failed to set permissions on ${UNIT_PATH}"
chmod 664 "${UNIT_PATH}" || failed "failed to set permissions on ${UNIT_PATH}"
systemctl daemon-reload || failed "failed to reload daemons"
# Since we started with sudo, runsvc.sh will be owned by root. Change this to current login user.
@@ -92,25 +116,37 @@ function stop()
function uninstall()
{
if service_exists; then
stop
systemctl disable ${SVC_NAME} || failed "failed to disable ${SVC_NAME}"
rm "${UNIT_PATH}" || failed "failed to delete ${UNIT_PATH}"
else
echo "Service ${SVC_NAME} is not installed"
fi
if [ -f "${CONFIG_PATH}" ]; then
rm "${CONFIG_PATH}" || failed "failed to delete ${CONFIG_PATH}"
fi
systemctl daemon-reload || failed "failed to reload daemons"
}
function service_exists() {
if [ -f "${UNIT_PATH}" ]; then
return 0
else
return 1
fi
}
function status()
{
if [ -f "${UNIT_PATH}" ]; then
if service_exists; then
echo
echo "${UNIT_PATH}"
else
echo
echo "not installed"
echo
return
exit 1
fi
systemctl --no-pager status ${SVC_NAME}

64
src/Misc/layoutbin/update.sh.template Normal file → Executable file
View File

@@ -18,6 +18,8 @@ downloadrunnerversion=_DOWNLOAD_RUNNER_VERSION_
logfile="_UPDATE_LOG_"
restartinteractiverunner=_RESTART_INTERACTIVE_RUNNER_
telemetryfile="$rootfolder/_diag/.telemetry"
# log user who run the script
date "+[%F %T-%4N] --------whoami--------" >> "$logfile" 2>&1
whoami >> "$logfile" 2>&1
@@ -28,13 +30,13 @@ date "+[%F %T-%4N] Waiting for $runnerprocessname ($runnerpid) to complete" >> "
while [ -e /proc/$runnerpid ]
do
date "+[%F %T-%4N] Process $runnerpid still running" >> "$logfile" 2>&1
ping -c 2 127.0.0.1 >nul
"$rootfolder"/safe_sleep.sh 2
done
date "+[%F %T-%4N] Process $runnerpid finished running" >> "$logfile" 2>&1
# start re-organize folders
date "+[%F %T-%4N] Sleep 1 more second to make sure process exited" >> "$logfile" 2>&1
ping -c 2 127.0.0.1 >nul
"$rootfolder"/safe_sleep.sh 1
# the folder structure under runner root will be
# ./bin -> bin.2.100.0 (junction folder)
@@ -118,6 +120,64 @@ then
exit 1
fi
# fix upgrade issue with macOS when running as a service
attemptedtargetedfix=0
currentplatform=$(uname | awk '{print tolower($0)}')
if [[ "$currentplatform" == 'darwin' && restartinteractiverunner -eq 0 ]]; then
# We needed a fix for https://github.com/actions/runner/issues/743
# We will recreate the ./externals/nodeXY/bin/node of the past runner version that launched the runnerlistener service
# Otherwise mac gatekeeper kills the processes we spawn on creation as we are running a process with no backing file
# We need the pid for the nodejs loop, get that here, its the parent of the runner C# pid
# assumption here is only one process is invoking rootfolder/runsvc.sh
procgroup=$(ps x -o pgid,command | grep "$rootfolder/runsvc.sh" | grep -v grep | awk '{print $1}')
if [[ $? -eq 0 && -n "$procgroup" ]]
then
# inspect the open file handles to find the node process
# we can't actually inspect the process using ps because it uses relative paths and doesn't follow symlinks
nodever="node16"
path=$(lsof -a -g "$procgroup" -F n | grep $nodever/bin/node | grep externals | tail -1 | cut -c2-)
if [[ $? -ne 0 || -z "$path" ]] # Fallback if RunnerService.js was started with node12
then
nodever="node12"
path=$(lsof -a -g "$procgroup" -F n | grep $nodever/bin/node | grep externals | tail -1 | cut -c2-)
fi
if [[ $? -eq 0 && -n "$path" ]]
then
# trim the last 5 characters of the path '/node'
trimmedpath=$(dirname "$path")
if [[ $? -eq 0 && -n "$trimmedpath" ]]
then
attemptedtargetedfix=1
# Create the path if it does not exist
if [[ ! -e "$path" ]]
then
date "+[%F %T-%4N] Creating fallback node at path $path" >> "$logfile" 2>&1
mkdir -p "$trimmedpath"
cp "$rootfolder/externals/$nodever/bin/node" "$path"
else
date "+[%F %T-%4N] Path for fallback node exists, skipping creating $path" >> "$logfile" 2>&1
fi
else
date "+[%F %T-%4N] DarwinRunnerUpgrade: Failed to trim runner path. TrimmedPath: $trimmedpath, path: $path, pgid: $procgroup, root: $rootfolder" >> "$logfile" 2>&1
date "+[%F %T-%4N] DarwinRunnerUpgrade: Failed to trim runner path. TrimmedPath: $trimmedpath, path: $path, pgid: $procgroup, root: $rootfolder" >> "$telemetryfile" 2>&1
fi
else
date "+[%F %T-%4N] DarwinRunnerUpgrade: Failed to find runner path. Path: $path, pgid: $procgroup, root: $rootfolder" >> "$logfile" 2>&1
date "+[%F %T-%4N] DarwinRunnerUpgrade: Failed to find runner path. Path: $path, pgid: $procgroup, root: $rootfolder" >> "$telemetryfile" 2>&1
fi
else
runproc=$(ps x -o pgid,command | grep "run.sh" | grep -v grep | awk '{print $1}')
if [[ $? -eq 0 && -n "$runproc" ]]
then
date "+[%F %T-%4N] Running as ephemeral using run.sh, no need to recreate node folder" >> "$logfile" 2>&1
else
date "+[%F %T-%4N] DarwinRunnerUpgrade: Failed to find runner pgid. pgid: $procgroup, root: $rootfolder" >> "$logfile" 2>&1
date "+[%F %T-%4N] DarwinRunnerUpgrade: Failed to find runner pgid. pgid: $procgroup, root: $rootfolder" >> "$telemetryfile" 2>&1
fi
fi
fi
date "+[%F %T-%4N] Update succeed" >> "$logfile"
# rename the update log file with %logfile%.succeed/.failed/succeedneedrestart

View File

@@ -8,7 +8,7 @@ if [ $user_id -eq 0 -a -z "$RUNNER_ALLOW_RUNASROOT" ]; then
exit 1
fi
# Check dotnet core 3.0 dependencies for Linux
# Check dotnet Core 6.0 dependencies for Linux
if [[ (`uname` == "Linux") ]]
then
command -v ldd > /dev/null
@@ -18,24 +18,26 @@ then
exit 1
fi
message="Execute sudo ./bin/installdependencies.sh to install any missing Dotnet Core 6.0 dependencies."
ldd ./bin/libcoreclr.so | grep 'not found'
if [ $? -eq 0 ]; then
echo "Dependencies is missing for Dotnet Core 3.0"
echo "Execute ./bin/installdependencies.sh to install any missing Dotnet Core 3.0 dependencies."
echo "Dependencies is missing for Dotnet Core 6.0"
echo $message
exit 1
fi
ldd ./bin/System.Security.Cryptography.Native.OpenSsl.so | grep 'not found'
ldd ./bin/libSystem.Security.Cryptography.Native.OpenSsl.so | grep 'not found'
if [ $? -eq 0 ]; then
echo "Dependencies is missing for Dotnet Core 3.0"
echo "Execute ./bin/installdependencies.sh to install any missing Dotnet Core 3.0 dependencies."
echo "Dependencies is missing for Dotnet Core 6.0"
echo $message
exit 1
fi
ldd ./bin/System.IO.Compression.Native.so | grep 'not found'
ldd ./bin/libSystem.IO.Compression.Native.so | grep 'not found'
if [ $? -eq 0 ]; then
echo "Dependencies is missing for Dotnet Core 3.0"
echo "Execute ./bin/installdependencies.sh to install any missing Dotnet Core 3.0 dependencies."
echo "Dependencies is missing for Dotnet Core 6.0"
echo $message
exit 1
fi
@@ -50,10 +52,10 @@ then
fi
libpath=${LD_LIBRARY_PATH:-}
$LDCONFIG_COMMAND -NXv ${libpath//:/} 2>&1 | grep libicu >/dev/null 2>&1
$LDCONFIG_COMMAND -NXv ${libpath//:/ } 2>&1 | grep libicu >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Libicu's dependencies is missing for Dotnet Core 3.0"
echo "Execute ./bin/installdependencies.sh to install any missing Dotnet Core 3.0 dependencies."
echo "Libicu's dependencies is missing for Dotnet Core 6.0"
echo $message
exit 1
fi
fi
@@ -67,7 +69,7 @@ while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symli
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
cd $DIR
cd "$DIR"
source ./env.sh

View File

@@ -6,6 +6,7 @@ varCheckList=(
'ANT_HOME'
'M2_HOME'
'ANDROID_HOME'
'ANDROID_SDK_ROOT'
'GRADLE_HOME'
'NVM_BIN'
'NVM_PATH'

View File

@@ -0,0 +1,39 @@
@echo off
"%~dp0\bin\Runner.Listener.exe" run %*
rem using `if %ERRORLEVEL% EQU N` insterad of `if ERRORLEVEL N`
rem `if ERRORLEVEL N` means: error level is N or MORE
if %ERRORLEVEL% EQU 0 (
echo "Runner listener exit with 0 return code, stop the service, no retry needed."
exit /b 0
)
if %ERRORLEVEL% EQU 1 (
echo "Runner listener exit with terminated error, stop the service, no retry needed."
exit /b 0
)
if %ERRORLEVEL% EQU 2 (
echo "Runner listener exit with retryable error, re-launch runner in 5 seconds."
ping 127.0.0.1 -n 6 -w 1000 >NUL
exit /b 1
)
if %ERRORLEVEL% EQU 3 (
rem Sleep 5 seconds to wait for the runner update process finish
echo "Runner listener exit because of updating, re-launch runner in 5 seconds"
ping 127.0.0.1 -n 6 -w 1000 >NUL
exit /b 1
)
if %ERRORLEVEL% EQU 4 (
rem Sleep 5 seconds to wait for the ephemeral runner update process finish
echo "Runner listener exit because of updating, re-launch ephemeral runner in 5 seconds"
ping 127.0.0.1 -n 6 -w 1000 >NUL
exit /b 1
)
echo "Exiting after unknown error code: %ERRORLEVEL%"
exit /b 0

View File

@@ -0,0 +1,46 @@
#!/bin/bash
# Validate not sudo
user_id=`id -u`
if [ $user_id -eq 0 -a -z "$RUNNER_ALLOW_RUNASROOT" ]; then
echo "Must not run interactively with sudo"
exit 1
fi
# Run
shopt -s nocasematch
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
"$DIR"/bin/Runner.Listener run $*
returnCode=$?
if [[ $returnCode == 0 ]]; then
echo "Runner listener exit with 0 return code, stop the service, no retry needed."
exit 0
elif [[ $returnCode == 1 ]]; then
echo "Runner listener exit with terminated error, stop the service, no retry needed."
exit 0
elif [[ $returnCode == 2 ]]; then
echo "Runner listener exit with retryable error, re-launch runner in 5 seconds."
"$DIR"/safe_sleep.sh 5
exit 2
elif [[ $returnCode == 3 ]]; then
# Sleep 5 seconds to wait for the runner update process finish
echo "Runner listener exit because of updating, re-launch runner in 5 seconds"
"$DIR"/safe_sleep.sh 5
exit 2
elif [[ $returnCode == 4 ]]; then
# Sleep 5 seconds to wait for the ephemeral runner update process finish
echo "Runner listener exit because of updating, re-launch ephemeral runner in 5 seconds"
"$DIR"/safe_sleep.sh 5
exit 2
else
echo "Exiting with unknown error code: ${returnCode}"
exit 0
fi

View File

@@ -13,21 +13,19 @@ if defined VERBOSE_ARG (
rem Unblock files in the root of the layout folder. E.g. .cmd files.
powershell.exe -NoLogo -Sta -NoProfile -NonInteractive -ExecutionPolicy Unrestricted -Command "$VerbosePreference = %VERBOSE_ARG% ; Get-ChildItem -LiteralPath '%~dp0' | ForEach-Object { Write-Verbose ('Unblock: {0}' -f $_.FullName) ; $_ } | Unblock-File | Out-Null"
if /i "%~1" equ "localRun" (
rem ********************************************************************************
rem Local run.
rem ********************************************************************************
"%~dp0bin\Runner.Listener.exe" %*
) else (
rem ********************************************************************************
rem Run.
rem ********************************************************************************
"%~dp0bin\Runner.Listener.exe" run %*
rem Return code 4 means the run once runner received an update message.
rem Sleep 5 seconds to wait for the update process finish and run the runner again.
if ERRORLEVEL 4 (
timeout /t 5 /nobreak > NUL
"%~dp0bin\Runner.Listener.exe" run %*
)
rem ********************************************************************************
rem Run.
rem ********************************************************************************
:launch_helper
copy "%~dp0run-helper.cmd.template" "%~dp0run-helper.cmd" /Y
call "%~dp0run-helper.cmd" %*
if %ERRORLEVEL% EQU 1 (
echo "Restarting runner..."
goto :launch_helper
) else (
echo "Exiting runner..."
exit /b 0
)

View File

@@ -1,12 +1,5 @@
#!/bin/bash
# Validate not sudo
user_id=`id -u`
if [ $user_id -eq 0 -a -z "$RUNNER_ALLOW_RUNASROOT" ]; then
echo "Must not run interactively with sudo"
exit 1
fi
# Change directory to the script root directory
# https://stackoverflow.com/questions/59895/getting-the-source-directory-of-a-bash-script-from-within
SOURCE="${BASH_SOURCE[0]}"
@@ -16,36 +9,16 @@ while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symli
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
# Do not "cd $DIR". For localRun, the current directory is expected to be the repo location on disk.
# Run
shopt -s nocasematch
if [[ "$1" == "localRun" ]]; then
"$DIR"/bin/Runner.Listener $*
else
"$DIR"/bin/Runner.Listener run $*
# Return code 4 means the run once runner received an update message.
# Sleep 5 seconds to wait for the update process finish and run the runner again.
cp -f "$DIR"/run-helper.sh.template "$DIR"/run-helper.sh
# run the helper process which keep the listener alive
while :;
do
"$DIR"/run-helper.sh $*
returnCode=$?
if [[ $returnCode == 4 ]]; then
if [ ! -x "$(command -v sleep)" ]; then
if [ ! -x "$(command -v ping)" ]; then
COUNT="0"
while [[ $COUNT != 5000 ]]; do
echo "SLEEP" >nul
COUNT=$[$COUNT+1]
done
if [[ $returnCode -eq 2 ]]; then
echo "Restarting runner..."
else
ping -n 5 127.0.0.1 >nul
echo "Exiting runner..."
exit 0
fi
else
sleep 5 >nul
fi
"$DIR"/bin/Runner.Listener run $*
else
exit $returnCode
fi
fi
done

View File

@@ -0,0 +1,6 @@
#!/bin/bash
SECONDS=0
while [[ $SECONDS != $1 ]]; do
:
done

57
src/Misc/runnercoreassets Normal file
View File

@@ -0,0 +1,57 @@
actions.runner.plist.template
actions.runner.service.template
checkScripts/downloadCert.js
checkScripts/makeWebRequest.js
darwin.svc.sh.template
hashFiles/index.js
installdependencies.sh
macos-run-invoker.js
Microsoft.IdentityModel.Logging.dll
Microsoft.IdentityModel.Tokens.dll
Minimatch.dll
Newtonsoft.Json.Bson.dll
Newtonsoft.Json.dll
Runner.Common.deps.json
Runner.Common.dll
Runner.Common.pdb
Runner.Listener
Runner.Listener.deps.json
Runner.Listener.dll
Runner.Listener.exe
Runner.Listener.pdb
Runner.Listener.runtimeconfig.json
Runner.PluginHost
Runner.PluginHost.deps.json
Runner.PluginHost.dll
Runner.PluginHost.exe
Runner.PluginHost.pdb
Runner.PluginHost.runtimeconfig.json
Runner.Plugins.deps.json
Runner.Plugins.dll
Runner.Plugins.pdb
Runner.Sdk.deps.json
Runner.Sdk.dll
Runner.Sdk.pdb
Runner.Worker
Runner.Worker.deps.json
Runner.Worker.dll
Runner.Worker.exe
Runner.Worker.pdb
Runner.Worker.runtimeconfig.json
RunnerService.exe
RunnerService.exe.config
RunnerService.js
RunnerService.pdb
runsvc.sh
Sdk.deps.json
Sdk.dll
Sdk.pdb
System.IdentityModel.Tokens.Jwt.dll
System.Net.Http.Formatting.dll
System.Security.Cryptography.Pkcs.dll
System.Security.Cryptography.ProtectedData.dll
System.ServiceProcess.ServiceController.dll
systemd.svc.sh.template
update.cmd.template
update.sh.template
YamlDotNet.dll

View File

@@ -0,0 +1,264 @@
api-ms-win-core-console-l1-1-0.dll
api-ms-win-core-console-l1-2-0.dll
api-ms-win-core-datetime-l1-1-0.dll
api-ms-win-core-debug-l1-1-0.dll
api-ms-win-core-errorhandling-l1-1-0.dll
api-ms-win-core-fibers-l1-1-0.dll
api-ms-win-core-file-l1-1-0.dll
api-ms-win-core-file-l1-2-0.dll
api-ms-win-core-file-l2-1-0.dll
api-ms-win-core-handle-l1-1-0.dll
api-ms-win-core-heap-l1-1-0.dll
api-ms-win-core-interlocked-l1-1-0.dll
api-ms-win-core-libraryloader-l1-1-0.dll
api-ms-win-core-localization-l1-2-0.dll
api-ms-win-core-memory-l1-1-0.dll
api-ms-win-core-namedpipe-l1-1-0.dll
api-ms-win-core-processenvironment-l1-1-0.dll
api-ms-win-core-processthreads-l1-1-0.dll
api-ms-win-core-processthreads-l1-1-1.dll
api-ms-win-core-profile-l1-1-0.dll
api-ms-win-core-rtlsupport-l1-1-0.dll
api-ms-win-core-string-l1-1-0.dll
api-ms-win-core-synch-l1-1-0.dll
api-ms-win-core-synch-l1-2-0.dll
api-ms-win-core-sysinfo-l1-1-0.dll
api-ms-win-core-timezone-l1-1-0.dll
api-ms-win-core-util-l1-1-0.dll
api-ms-win-crt-conio-l1-1-0.dll
api-ms-win-crt-convert-l1-1-0.dll
api-ms-win-crt-environment-l1-1-0.dll
api-ms-win-crt-filesystem-l1-1-0.dll
api-ms-win-crt-heap-l1-1-0.dll
api-ms-win-crt-locale-l1-1-0.dll
api-ms-win-crt-math-l1-1-0.dll
api-ms-win-crt-multibyte-l1-1-0.dll
api-ms-win-crt-private-l1-1-0.dll
api-ms-win-crt-process-l1-1-0.dll
api-ms-win-crt-runtime-l1-1-0.dll
api-ms-win-crt-stdio-l1-1-0.dll
api-ms-win-crt-string-l1-1-0.dll
api-ms-win-crt-time-l1-1-0.dll
api-ms-win-crt-utility-l1-1-0.dll
clrcompression.dll
clretwrc.dll
clrjit.dll
coreclr.dll
createdump
createdump.exe
dbgshim.dll
hostfxr.dll
hostpolicy.dll
libclrjit.dylib
libclrjit.so
libcoreclr.dylib
libcoreclr.so
libcoreclrtraceptprovider.so
libdbgshim.dylib
libdbgshim.so
libhostfxr.dylib
libhostfxr.so
libhostpolicy.dylib
libhostpolicy.so
libmscordaccore.dylib
libmscordaccore.so
libmscordbi.dylib
libmscordbi.so
Microsoft.CSharp.dll
Microsoft.DiaSymReader.Native.amd64.dll
Microsoft.VisualBasic.Core.dll
Microsoft.VisualBasic.dll
Microsoft.Win32.Primitives.dll
Microsoft.Win32.Registry.dll
mscordaccore.dll
mscordaccore_amd64_amd64_6.0.522.21309.dll
mscordbi.dll
mscorlib.dll
mscorrc.debug.dll
mscorrc.dll
msquic.dll
netstandard.dll
SOS_README.md
System.AppContext.dll
System.Buffers.dll
System.Collections.Concurrent.dll
System.Collections.dll
System.Collections.Immutable.dll
System.Collections.NonGeneric.dll
System.Collections.Specialized.dll
System.ComponentModel.Annotations.dll
System.ComponentModel.DataAnnotations.dll
System.ComponentModel.dll
System.ComponentModel.EventBasedAsync.dll
System.ComponentModel.Primitives.dll
System.ComponentModel.TypeConverter.dll
System.Configuration.dll
System.Console.dll
System.Core.dll
System.Data.Common.dll
System.Data.DataSetExtensions.dll
System.Data.dll
System.Diagnostics.Contracts.dll
System.Diagnostics.Debug.dll
System.Diagnostics.DiagnosticSource.dll
System.Diagnostics.FileVersionInfo.dll
System.Diagnostics.Process.dll
System.Diagnostics.StackTrace.dll
System.Diagnostics.TextWriterTraceListener.dll
System.Diagnostics.Tools.dll
System.Diagnostics.TraceSource.dll
System.Diagnostics.Tracing.dll
System.dll
System.Drawing.dll
System.Drawing.Primitives.dll
System.Dynamic.Runtime.dll
System.Formats.Asn1.dll
System.Globalization.Calendars.dll
System.Globalization.dll
System.Globalization.Extensions.dll
System.Globalization.Native.dylib
System.Globalization.Native.so
System.IO.Compression.Brotli.dll
System.IO.Compression.dll
System.IO.Compression.FileSystem.dll
System.IO.Compression.Native.a
System.IO.Compression.Native.dll
System.IO.Compression.Native.dylib
System.IO.Compression.Native.so
System.IO.Compression.ZipFile.dll
System.IO.dll
System.IO.FileSystem.AccessControl.dll
System.IO.FileSystem.dll
System.IO.FileSystem.DriveInfo.dll
System.IO.FileSystem.Primitives.dll
System.IO.FileSystem.Watcher.dll
System.IO.IsolatedStorage.dll
System.IO.MemoryMappedFiles.dll
System.IO.Pipes.AccessControl.dll
System.IO.Pipes.dll
System.IO.UnmanagedMemoryStream.dll
System.Linq.dll
System.Linq.Expressions.dll
System.Linq.Parallel.dll
System.Linq.Queryable.dll
System.Memory.dll
System.Native.a
System.Native.dylib
System.Native.so
System.Net.dll
System.Net.Http.dll
System.Net.Http.Json.dll
System.Net.Http.Native.a
System.Net.Http.Native.dylib
System.Net.Http.Native.so
System.Net.HttpListener.dll
System.Net.Mail.dll
System.Net.NameResolution.dll
System.Net.NetworkInformation.dll
System.Net.Ping.dll
System.Net.Primitives.dll
System.Net.Quic.dll
System.Net.Requests.dll
System.Net.Security.dll
System.Net.Security.Native.a
System.Net.Security.Native.dylib
System.Net.Security.Native.so
System.Net.ServicePoint.dll
System.Net.Sockets.dll
System.Net.WebClient.dll
System.Net.WebHeaderCollection.dll
System.Net.WebProxy.dll
System.Net.WebSockets.Client.dll
System.Net.WebSockets.dll
System.Numerics.dll
System.Numerics.Vectors.dll
System.ObjectModel.dll
System.Private.CoreLib.dll
System.Private.DataContractSerialization.dll
System.Private.Uri.dll
System.Private.Xml.dll
System.Private.Xml.Linq.dll
System.Reflection.DispatchProxy.dll
System.Reflection.dll
System.Reflection.Emit.dll
System.Reflection.Emit.ILGeneration.dll
System.Reflection.Emit.Lightweight.dll
System.Reflection.Extensions.dll
System.Reflection.Metadata.dll
System.Reflection.Primitives.dll
System.Reflection.TypeExtensions.dll
System.Resources.Reader.dll
System.Resources.ResourceManager.dll
System.Resources.Writer.dll
System.Runtime.CompilerServices.Unsafe.dll
System.Runtime.CompilerServices.VisualC.dll
System.Runtime.dll
System.Runtime.Extensions.dll
System.Runtime.Handles.dll
System.Runtime.InteropServices.dll
System.Runtime.InteropServices.RuntimeInformation.dll
System.Runtime.InteropServices.WindowsRuntime.dll
System.Runtime.Intrinsics.dll
System.Runtime.Loader.dll
System.Runtime.Numerics.dll
System.Runtime.Serialization.dll
System.Runtime.Serialization.Formatters.dll
System.Runtime.Serialization.Json.dll
System.Runtime.Serialization.Primitives.dll
System.Runtime.Serialization.Xml.dll
System.Runtime.WindowsRuntime.dll
System.Runtime.WindowsRuntime.UI.Xaml.dll
System.Security.AccessControl.dll
System.Security.Claims.dll
System.Security.Cryptography.Algorithms.dll
System.Security.Cryptography.Cng.dll
System.Security.Cryptography.Csp.dll
System.Security.Cryptography.Encoding.dll
System.Security.Cryptography.Native.Apple.a
System.Security.Cryptography.Native.Apple.dylib
System.Security.Cryptography.Native.OpenSsl.a
System.Security.Cryptography.Native.OpenSsl.dylib
System.Security.Cryptography.Native.OpenSsl.so
System.Security.Cryptography.OpenSsl.dll
System.Security.Cryptography.Primitives.dll
System.Security.Cryptography.X509Certificates.dll
System.Security.Cryptography.XCertificates.dll
System.Security.dll
System.Security.Principal.dll
System.Security.Principal.Windows.dll
System.Security.SecureString.dll
System.ServiceModel.Web.dll
System.ServiceProcess.dll
System.Text.Encoding.CodePages.dll
System.Text.Encoding.dll
System.Text.Encoding.Extensions.dll
System.Text.Encodings.Web.dll
System.Text.Json.dll
System.Text.RegularExpressions.dll
System.Threading.Channels.dll
System.Threading.dll
System.Threading.Overlapped.dll
System.Threading.Tasks.Dataflow.dll
System.Threading.Tasks.dll
System.Threading.Tasks.Extensions.dll
System.Threading.Tasks.Parallel.dll
System.Threading.Thread.dll
System.Threading.ThreadPool.dll
System.Threading.Timer.dll
System.Transactions.dll
System.Transactions.Local.dll
System.ValueTuple.dll
System.Web.dll
System.Web.HttpUtility.dll
System.Windows.dll
System.Xml.dll
System.Xml.Linq.dll
System.Xml.ReaderWriter.dll
System.Xml.Serialization.dll
System.Xml.XDocument.dll
System.Xml.XmlDocument.dll
System.Xml.XmlSerializer.dll
System.Xml.XPath.dll
System.Xml.XPath.XDocument.dll
ucrtbase.dll
WindowsBase.dll

View File

@@ -0,0 +1,24 @@
[
{
"HashValue": "<NO_RUNTIME_EXTERNALS_HASH>",
"DownloadUrl": "https://github.com/actions/runner/releases/download/v<RUNNER_VERSION>/actions-runner-<RUNNER_PLATFORM>-<RUNNER_VERSION>-noruntime-noexternals.tar.gz",
"TrimmedContents": {
"dotnetRuntime": "<RUNTIME_HASH>",
"externals": "<EXTERNALS_HASH>"
}
},
{
"HashValue": "<NO_RUNTIME_HASH>",
"DownloadUrl": "https://github.com/actions/runner/releases/download/v<RUNNER_VERSION>/actions-runner-<RUNNER_PLATFORM>-<RUNNER_VERSION>-noruntime.tar.gz",
"TrimmedContents": {
"dotnetRuntime": "<RUNTIME_HASH>"
}
},
{
"HashValue": "<NO_EXTERNALS_HASH>",
"DownloadUrl": "https://github.com/actions/runner/releases/download/v<RUNNER_VERSION>/actions-runner-<RUNNER_PLATFORM>-<RUNNER_VERSION>-noexternals.tar.gz",
"TrimmedContents": {
"externals": "<EXTERNALS_HASH>"
}
}
]

View File

@@ -0,0 +1,24 @@
[
{
"HashValue": "<NO_RUNTIME_EXTERNALS_HASH>",
"DownloadUrl": "https://github.com/actions/runner/releases/download/v<RUNNER_VERSION>/actions-runner-<RUNNER_PLATFORM>-<RUNNER_VERSION>-noruntime-noexternals.zip",
"TrimmedContents": {
"dotnetRuntime": "<RUNTIME_HASH>",
"externals": "<EXTERNALS_HASH>"
}
},
{
"HashValue": "<NO_RUNTIME_HASH>",
"DownloadUrl": "https://github.com/actions/runner/releases/download/v<RUNNER_VERSION>/actions-runner-<RUNNER_PLATFORM>-<RUNNER_VERSION>-noruntime.zip",
"TrimmedContents": {
"dotnetRuntime": "<RUNTIME_HASH>"
}
},
{
"HashValue": "<NO_EXTERNALS_HASH>",
"DownloadUrl": "https://github.com/actions/runner/releases/download/v<RUNNER_VERSION>/actions-runner-<RUNNER_PLATFORM>-<RUNNER_VERSION>-noexternals.zip",
"TrimmedContents": {
"externals": "<EXTERNALS_HASH>"
}
}
]

View File

@@ -33,6 +33,12 @@ namespace GitHub.Runner.Common
[DataMember(EmitDefaultValue = false)]
public string PoolName { get; set; }
[DataMember(EmitDefaultValue = false)]
public bool DisableUpdate { get; set; }
[DataMember(EmitDefaultValue = false)]
public bool Ephemeral { get; set; }
[DataMember(EmitDefaultValue = false)]
public string ServerUrl { get; set; }
@@ -108,9 +114,9 @@ namespace GitHub.Runner.Common
CredentialData GetMigratedCredentials();
RunnerSettings GetSettings();
void SaveCredential(CredentialData credential);
void SaveMigratedCredential(CredentialData credential);
void SaveSettings(RunnerSettings settings);
void DeleteCredential();
void DeleteMigratedCredential();
void DeleteSettings();
}
@@ -232,21 +238,6 @@ namespace GitHub.Runner.Common
File.SetAttributes(_credFilePath, File.GetAttributes(_credFilePath) | FileAttributes.Hidden);
}
public void SaveMigratedCredential(CredentialData credential)
{
Trace.Info("Saving {0} migrated credential @ {1}", credential.Scheme, _migratedCredFilePath);
if (File.Exists(_migratedCredFilePath))
{
// Delete existing credential file first, since the file is hidden and not able to overwrite.
Trace.Info("Delete exist runner migrated credential file.");
IOUtil.DeleteFile(_migratedCredFilePath);
}
IOUtil.SaveObject(credential, _migratedCredFilePath);
Trace.Info("Migrated Credentials Saved.");
File.SetAttributes(_migratedCredFilePath, File.GetAttributes(_migratedCredFilePath) | FileAttributes.Hidden);
}
public void SaveSettings(RunnerSettings settings)
{
Trace.Info("Saving runner settings.");
@@ -268,6 +259,11 @@ namespace GitHub.Runner.Common
IOUtil.Delete(_migratedCredFilePath, default(CancellationToken));
}
public void DeleteMigratedCredential()
{
IOUtil.Delete(_migratedCredFilePath, default(CancellationToken));
}
public void DeleteSettings()
{
IOUtil.Delete(_configFilePath, default(CancellationToken));

View File

@@ -26,6 +26,7 @@ namespace GitHub.Runner.Common
Certificates,
Options,
SetupInfo,
Telemetry
}
public static class Constants
@@ -41,6 +42,8 @@ namespace GitHub.Runner.Common
public static string PluginTracePrefix = "##[plugin.trace]";
public static readonly int RunnerDownloadRetryMaxAttempts = 3;
public static readonly int CompositeActionsMaxDepth = 9;
// This enum is embedded within the Constants class to make it easier to reference and avoid
// ambiguous type reference with System.Runtime.InteropServices.OSPlatform and System.Runtime.InteropServices.Architecture
public enum OSPlatform
@@ -83,14 +86,15 @@ namespace GitHub.Runner.Common
public static class CommandLine
{
//if you are adding a new arg, please make sure you update the
//validArgs array as well present in the CommandSettings.cs
//validOptions dictionary as well present in the CommandSettings.cs
public static class Args
{
public static readonly string Auth = "auth";
public static readonly string JitConfig = "jitconfig";
public static readonly string Labels = "labels";
public static readonly string MonitorSocketAddress = "monitorsocketaddress";
public static readonly string Name = "name";
public static readonly string Pool = "pool";
public static readonly string RunnerGroup = "runnergroup";
public static readonly string StartupType = "startuptype";
public static readonly string Url = "url";
public static readonly string UserName = "username";
@@ -99,9 +103,11 @@ namespace GitHub.Runner.Common
// Secret args. Must be added to the "Secrets" getter as well.
public static readonly string Token = "token";
public static readonly string PAT = "pat";
public static readonly string WindowsLogonPassword = "windowslogonpassword";
public static string[] Secrets => new[]
{
PAT,
Token,
WindowsLogonPassword,
};
@@ -116,13 +122,16 @@ namespace GitHub.Runner.Common
}
//if you are adding a new flag, please make sure you update the
//validFlags array as well present in the CommandSettings.cs
//validOptions dictionary as well present in the CommandSettings.cs
public static class Flags
{
public static readonly string Check = "check";
public static readonly string Commit = "commit";
public static readonly string Ephemeral = "ephemeral";
public static readonly string Help = "help";
public static readonly string Replace = "replace";
public static readonly string Once = "once";
public static readonly string DisableUpdate = "disableupdate";
public static readonly string Once = "once"; // Keep this around since customers still relies on it
public static readonly string RunAsService = "runasservice";
public static readonly string Unattended = "unattended";
public static readonly string Version = "version";
@@ -137,6 +146,23 @@ namespace GitHub.Runner.Common
public const int RunnerUpdating = 3;
public const int RunOnceRunnerUpdating = 4;
}
public static class Features
{
public static readonly string DiskSpaceWarning = "runner.diskspace.warning";
public static readonly string Node12Warning = "DistributedTask.AddWarningToNode12Action";
public static readonly string UseContainerPathForTemplate = "DistributedTask.UseContainerPathForTemplate";
public static readonly string AllowRunnerContainerHooks = "DistributedTask.AllowRunnerContainerHooks";
}
public static readonly string InternalTelemetryIssueDataKey = "_internal_telemetry";
public static readonly string WorkerCrash = "WORKER_CRASH";
public static readonly string LowDiskSpace = "LOW_DISK_SPACE";
public static readonly string UnsupportedCommand = "UNSUPPORTED_COMMAND";
public static readonly string UnsupportedCommandMessageDisabled = "The `{0}` command is disabled. Please upgrade to using Environment Files or opt into unsecure command execution by setting the `ACTIONS_ALLOW_UNSECURE_COMMANDS` environment variable to `true`. For more information see: https://github.blog/changelog/2020-10-01-github-actions-deprecating-set-env-and-add-path-commands/";
public static readonly string UnsupportedStopCommandTokenDisabled = "You cannot use a endToken that is an empty string, the string 'pause-logging', or another workflow command. For more information see: https://docs.github.com/actions/learn-github-actions/workflow-commands-for-github-actions#example-stopping-and-starting-workflow-commands or opt into insecure command execution by setting the `ACTIONS_ALLOW_UNSECURE_STOPCOMMAND_TOKENS` environment variable to `true`.";
public static readonly string UnsupportedSummarySize = "$GITHUB_STEP_SUMMARY upload aborted, supports content up to a size of {0}k, got {1}k. For more information see: https://docs.github.com/actions/using-workflows/workflow-commands-for-github-actions#adding-a-markdown-summary";
public static readonly string Node12DetectedAfterEndOfLife = "Node.js 12 actions are deprecated. Please update the following actions to use Node.js 16: {0}";
}
public static class RunnerEvent
@@ -168,6 +194,13 @@ namespace GitHub.Runner.Common
public static readonly string Success = "success";
}
public static class Hooks
{
public static readonly string JobStartedStepName = "Set up runner";
public static readonly string JobCompletedStepName = "Complete runner";
public static readonly string ContainerHooksPath = "ACTIONS_RUNNER_CONTAINER_HOOKS";
}
public static class Path
{
public static readonly string ActionsDirectory = "_actions";
@@ -195,13 +228,21 @@ namespace GitHub.Runner.Common
//
// Keep alphabetical
//
public static readonly string AllowUnsupportedCommands = "ACTIONS_ALLOW_UNSECURE_COMMANDS";
public static readonly string AllowUnsupportedStopCommandTokens = "ACTIONS_ALLOW_UNSECURE_STOPCOMMAND_TOKENS";
public static readonly string RequireJobContainer = "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER";
public static readonly string RunnerDebug = "ACTIONS_RUNNER_DEBUG";
public static readonly string StepDebug = "ACTIONS_STEP_DEBUG";
public static readonly string AllowActionsUseUnsecureNodeVersion = "ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION";
}
public static class Agent
{
public static readonly string ToolsDirectory = "agent.ToolsDirectory";
// Set this env var to "node12" to downgrade the node version for internal functions (e.g hashfiles). This does NOT affect the version of node actions.
public static readonly string ForcedInternalNodeVersion = "ACTIONS_RUNNER_FORCED_INTERNAL_NODE_VERSION";
public static readonly string ForcedActionsNodeVersion = "ACTIONS_RUNNER_FORCE_ACTIONS_NODE_VERSION";
}
public static class System

View File

@@ -51,11 +51,23 @@ namespace GitHub.Runner.Common
Add<T>(extensions, "GitHub.Runner.Worker.RemoveMatcherCommandExtension, Runner.Worker");
Add<T>(extensions, "GitHub.Runner.Worker.WarningCommandExtension, Runner.Worker");
Add<T>(extensions, "GitHub.Runner.Worker.ErrorCommandExtension, Runner.Worker");
Add<T>(extensions, "GitHub.Runner.Worker.NoticeCommandExtension, Runner.Worker");
Add<T>(extensions, "GitHub.Runner.Worker.DebugCommandExtension, Runner.Worker");
Add<T>(extensions, "GitHub.Runner.Worker.GroupCommandExtension, Runner.Worker");
Add<T>(extensions, "GitHub.Runner.Worker.EndGroupCommandExtension, Runner.Worker");
Add<T>(extensions, "GitHub.Runner.Worker.EchoCommandExtension, Runner.Worker");
break;
case "GitHub.Runner.Worker.IFileCommandExtension":
Add<T>(extensions, "GitHub.Runner.Worker.AddPathFileCommand, Runner.Worker");
Add<T>(extensions, "GitHub.Runner.Worker.SetEnvFileCommand, Runner.Worker");
Add<T>(extensions, "GitHub.Runner.Worker.CreateStepSummaryCommand, Runner.Worker");
break;
case "GitHub.Runner.Listener.Check.ICheckExtension":
Add<T>(extensions, "GitHub.Runner.Listener.Check.InternetCheck, Runner.Listener");
Add<T>(extensions, "GitHub.Runner.Listener.Check.ActionsCheck, Runner.Listener");
Add<T>(extensions, "GitHub.Runner.Listener.Check.GitCheck, Runner.Listener");
Add<T>(extensions, "GitHub.Runner.Listener.Check.NodeJsCheck, Runner.Listener");
break;
default:
// This should never happen.
throw new NotSupportedException($"Unexpected extension type: '{typeof(T).FullName}'");

View File

@@ -1,19 +1,19 @@
using GitHub.Runner.Common.Util;
using System;
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.Tracing;
using System.Globalization;
using System.IO;
using System.Linq;
using System.Net.Http;
using System.Net.Http.Headers;
using System.Reflection;
using System.Runtime.Loader;
using System.Threading;
using System.Threading.Tasks;
using System.Diagnostics;
using System.Net.Http;
using System.Diagnostics.Tracing;
using GitHub.DistributedTask.Logging;
using System.Net.Http.Headers;
using GitHub.Runner.Common.Util;
using GitHub.Runner.Sdk;
namespace GitHub.Runner.Common
@@ -85,24 +85,28 @@ namespace GitHub.Runner.Common
this.SecretMasker.AddValueEncoder(ValueEncoders.Base64StringEscape);
this.SecretMasker.AddValueEncoder(ValueEncoders.Base64StringEscapeShift1);
this.SecretMasker.AddValueEncoder(ValueEncoders.Base64StringEscapeShift2);
this.SecretMasker.AddValueEncoder(ValueEncoders.CommandLineArgumentEscape);
this.SecretMasker.AddValueEncoder(ValueEncoders.ExpressionStringEscape);
this.SecretMasker.AddValueEncoder(ValueEncoders.JsonStringEscape);
this.SecretMasker.AddValueEncoder(ValueEncoders.UriDataEscape);
this.SecretMasker.AddValueEncoder(ValueEncoders.XmlDataEscape);
this.SecretMasker.AddValueEncoder(ValueEncoders.TrimDoubleQuotes);
this.SecretMasker.AddValueEncoder(ValueEncoders.PowerShellPreAmpersandEscape);
this.SecretMasker.AddValueEncoder(ValueEncoders.PowerShellPostAmpersandEscape);
// Create the trace manager.
if (string.IsNullOrEmpty(logFile))
{
int logPageSize;
string logSizeEnv = Environment.GetEnvironmentVariable($"{hostType.ToUpperInvariant()}_LOGSIZE");
if (!string.IsNullOrEmpty(logSizeEnv) || !int.TryParse(logSizeEnv, out logPageSize))
if (string.IsNullOrEmpty(logSizeEnv) || !int.TryParse(logSizeEnv, out logPageSize))
{
logPageSize = _defaultLogPageSize;
}
int logRetentionDays;
string logRetentionDaysEnv = Environment.GetEnvironmentVariable($"{hostType.ToUpperInvariant()}_LOGRETENTION");
if (!string.IsNullOrEmpty(logRetentionDaysEnv) || !int.TryParse(logRetentionDaysEnv, out logRetentionDays))
if (string.IsNullOrEmpty(logRetentionDaysEnv) || !int.TryParse(logRetentionDaysEnv, out logRetentionDays))
{
logRetentionDays = _defaultLogRetentionDays;
}
@@ -190,6 +194,11 @@ namespace GitHub.Runner.Common
_trace.Info($"No proxy settings were found based on environmental variables (http_proxy/https_proxy/HTTP_PROXY/HTTPS_PROXY)");
}
if (StringUtil.ConvertToBoolean(Environment.GetEnvironmentVariable("GITHUB_ACTIONS_RUNNER_TLS_NO_VERIFY")))
{
_trace.Warning($"Runner is running under insecure mode: HTTPS server certifcate validation has been turned off by GITHUB_ACTIONS_RUNNER_TLS_NO_VERIFY environment variable.");
}
var credFile = GetConfigFile(WellKnownConfigFile.Credentials);
if (File.Exists(credFile))
{
@@ -197,9 +206,19 @@ namespace GitHub.Runner.Common
if (credData != null &&
credData.Data.TryGetValue("clientId", out var clientId))
{
_userAgents.Add(new ProductInfoHeaderValue($"RunnerId", clientId));
_userAgents.Add(new ProductInfoHeaderValue("ClientId", clientId));
}
}
var runnerFile = GetConfigFile(WellKnownConfigFile.Runner);
if (File.Exists(runnerFile))
{
var runnerSettings = IOUtil.LoadObject<RunnerSettings>(runnerFile);
_userAgents.Add(new ProductInfoHeaderValue("RunnerId", runnerSettings.AgentId.ToString(CultureInfo.InvariantCulture)));
_userAgents.Add(new ProductInfoHeaderValue("GroupId", runnerSettings.PoolId.ToString(CultureInfo.InvariantCulture)));
}
_userAgents.Add(new ProductInfoHeaderValue("CommitSHA", BuildConstants.Source.CommitHash));
}
public string GetDirectory(WellKnownDirectory directory)
@@ -340,6 +359,12 @@ namespace GitHub.Runner.Common
".setup_info");
break;
case WellKnownConfigFile.Telemetry:
path = Path.Combine(
GetDirectory(WellKnownDirectory.Diag),
".telemetry");
break;
default:
throw new NotSupportedException($"Unexpected well known config file: '{configFile}'");
}
@@ -614,9 +639,33 @@ namespace GitHub.Runner.Common
{
public static HttpClientHandler CreateHttpClientHandler(this IHostContext context)
{
HttpClientHandler clientHandler = new HttpClientHandler();
clientHandler.Proxy = context.WebProxy;
return clientHandler;
var handlerFactory = context.GetService<IHttpClientHandlerFactory>();
return handlerFactory.CreateClientHandler(context.WebProxy);
}
public static string GetDefaultShellForScript(this IHostContext hostContext, string path, string prependPath)
{
var trace = hostContext.GetTrace(nameof(GetDefaultShellForScript));
switch (Path.GetExtension(path))
{
case ".sh":
// use 'sh' args but prefer bash
if (WhichUtil.Which("bash", false, trace, prependPath) != null)
{
return "bash";
}
return "sh";
case ".ps1":
if (WhichUtil.Which("pwsh", false, trace, prependPath) != null)
{
return "pwsh";
}
return "powershell";
case ".js":
return Path.Combine(hostContext.GetDirectory(WellKnownDirectory.Externals), NodeUtil.GetInternalNodeVersion(), "bin", $"node{IOUtil.ExeExtension}") + " {0}";
default:
throw new ArgumentException($"{path} is not a valid path to a script. Make sure it ends in '.sh', '.ps1' or '.js'.");
}
}
}

View File

@@ -0,0 +1,27 @@
using System;
using System.Net.Http;
using GitHub.Runner.Sdk;
namespace GitHub.Runner.Common
{
[ServiceLocator(Default = typeof(HttpClientHandlerFactory))]
public interface IHttpClientHandlerFactory : IRunnerService
{
HttpClientHandler CreateClientHandler(RunnerWebProxy webProxy);
}
public class HttpClientHandlerFactory : RunnerService, IHttpClientHandlerFactory
{
public HttpClientHandler CreateClientHandler(RunnerWebProxy webProxy)
{
var client = new HttpClientHandler() { Proxy = webProxy };
if (StringUtil.ConvertToBoolean(Environment.GetEnvironmentVariable("GITHUB_ACTIONS_RUNNER_TLS_NO_VERIFY")))
{
client.ServerCertificateCustomValidationCallback = HttpClientHandler.DangerousAcceptAnyServerCertificateValidator;
}
return client;
}
}
}

View File

@@ -1,27 +1,39 @@
using GitHub.DistributedTask.WebApi;
using System;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net.Http;
using System.Net.Http.Headers;
using System.Net.WebSockets;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using GitHub.DistributedTask.WebApi;
using GitHub.Runner.Sdk;
using GitHub.Services.Common;
using GitHub.Services.WebApi;
using GitHub.Services.WebApi.Utilities.Internal;
using Newtonsoft.Json;
namespace GitHub.Runner.Common
{
[ServiceLocator(Default = typeof(JobServer))]
public interface IJobServer : IRunnerService
public interface IJobServer : IRunnerService, IAsyncDisposable
{
Task ConnectAsync(VssConnection jobConnection);
void InitializeWebsocketClient(ServiceEndpoint serviceEndpoint);
// logging and console
Task<TaskLog> AppendLogContentAsync(Guid scopeIdentifier, string hubName, Guid planId, int logId, Stream uploadStream, CancellationToken cancellationToken);
Task AppendTimelineRecordFeedAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, Guid timelineRecordId, Guid stepId, IList<string> lines, CancellationToken cancellationToken);
Task AppendTimelineRecordFeedAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, Guid timelineRecordId, Guid stepId, IList<string> lines, long? startLine, CancellationToken cancellationToken);
Task<TaskAttachment> CreateAttachmentAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, Guid timelineRecordId, String type, String name, Stream uploadStream, CancellationToken cancellationToken);
Task<TaskLog> CreateLogAsync(Guid scopeIdentifier, string hubName, Guid planId, TaskLog log, CancellationToken cancellationToken);
Task<Timeline> CreateTimelineAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, CancellationToken cancellationToken);
Task<List<TimelineRecord>> UpdateTimelineRecordsAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, IEnumerable<TimelineRecord> records, CancellationToken cancellationToken);
Task RaisePlanEventAsync<T>(Guid scopeIdentifier, string hubName, Guid planId, T eventData, CancellationToken cancellationToken) where T : JobEvent;
Task<Timeline> GetTimelineAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, CancellationToken cancellationToken);
Task<ActionDownloadInfoCollection> ResolveActionDownloadInfoAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid jobId, ActionReferenceList actions, CancellationToken cancellationToken);
}
public sealed class JobServer : RunnerService, IJobServer
@@ -29,11 +41,29 @@ namespace GitHub.Runner.Common
private bool _hasConnection;
private VssConnection _connection;
private TaskHttpClient _taskClient;
private ClientWebSocket _websocketClient;
private ServiceEndpoint _serviceEndpoint;
private int totalBatchedLinesAttemptedByWebsocket = 0;
private int failedAttemptsToPostBatchedLinesByWebsocket = 0;
private static readonly TimeSpan _minDelayForWebsocketReconnect = TimeSpan.FromMilliseconds(100);
private static readonly TimeSpan _maxDelayForWebsocketReconnect = TimeSpan.FromMilliseconds(500);
private static readonly int _minWebsocketFailurePercentageAllowed = 50;
private static readonly int _minWebsocketBatchedLinesCountToConsider = 5;
private Task _websocketConnectTask;
public async Task ConnectAsync(VssConnection jobConnection)
{
_connection = jobConnection;
int attemptCount = 5;
int totalAttempts = 5;
int attemptCount = totalAttempts;
var configurationStore = HostContext.GetService<IConfigurationStore>();
var runnerSettings = configurationStore.GetSettings();
while (!_connection.HasAuthenticated && attemptCount-- > 0)
{
try
@@ -43,17 +73,86 @@ namespace GitHub.Runner.Common
}
catch (Exception ex) when (attemptCount > 0)
{
Trace.Info($"Catch exception during connect. {attemptCount} attemp left.");
Trace.Info($"Catch exception during connect. {attemptCount} attempts left.");
Trace.Error(ex);
if (runnerSettings.IsHostedServer)
{
await CheckNetworkEndpointsAsync(attemptCount);
}
}
await Task.Delay(100);
int attempt = totalAttempts - attemptCount;
TimeSpan backoff = BackoffTimerHelper.GetExponentialBackoff(attempt, TimeSpan.FromMilliseconds(100), TimeSpan.FromSeconds(3.2), TimeSpan.FromMilliseconds(100));
await Task.Delay(backoff);
}
_taskClient = _connection.GetClient<TaskHttpClient>();
_hasConnection = true;
}
private async Task CheckNetworkEndpointsAsync(int attemptsLeft)
{
try
{
Trace.Info("Requesting Actions Service health endpoint status");
using (var httpClientHandler = HostContext.CreateHttpClientHandler())
using (var actionsClient = new HttpClient(httpClientHandler))
{
var baseUri = new Uri(_connection.Uri.GetLeftPart(UriPartial.Authority));
actionsClient.DefaultRequestHeaders.UserAgent.AddRange(HostContext.UserAgents);
// Call the _apis/health endpoint, and include how many attempts are left as a URL query for easy tracking
var response = await actionsClient.GetAsync(new Uri(baseUri, $"_apis/health?_internalRunnerAttemptsLeft={attemptsLeft}"));
Trace.Info($"Actions health status code: {response.StatusCode}");
}
}
catch (Exception ex)
{
// Log error, but continue as this call is best-effort
Trace.Info($"Actions Service health endpoint failed due to {ex.GetType().Name}");
Trace.Error(ex);
}
try
{
Trace.Info("Requesting Github API endpoint status");
// This is a dotcom public API... just call it directly
using (var httpClientHandler = HostContext.CreateHttpClientHandler())
using (var gitHubClient = new HttpClient(httpClientHandler))
{
gitHubClient.DefaultRequestHeaders.UserAgent.AddRange(HostContext.UserAgents);
// Call the api.github.com endpoint, and include how many attempts are left as a URL query for easy tracking
var response = await gitHubClient.GetAsync($"https://api.github.com?_internalRunnerAttemptsLeft={attemptsLeft}");
Trace.Info($"api.github.com status code: {response.StatusCode}");
}
}
catch (Exception ex)
{
// Log error, but continue as this call is best-effort
Trace.Info($"Github API endpoint failed due to {ex.GetType().Name}");
Trace.Error(ex);
}
}
public void InitializeWebsocketClient(ServiceEndpoint serviceEndpoint)
{
this._serviceEndpoint = serviceEndpoint;
InitializeWebsocketClient(TimeSpan.Zero);
}
public ValueTask DisposeAsync()
{
CloseWebSocket(WebSocketCloseStatus.NormalClosure, CancellationToken.None);
GC.SuppressFinalize(this);
return ValueTask.CompletedTask;
}
private void CheckConnection()
{
if (!_hasConnection)
@@ -62,6 +161,53 @@ namespace GitHub.Runner.Common
}
}
private void InitializeWebsocketClient(TimeSpan delay)
{
if (_serviceEndpoint.Authorization != null &&
_serviceEndpoint.Authorization.Parameters.TryGetValue(EndpointAuthorizationParameters.AccessToken, out var accessToken) &&
!string.IsNullOrEmpty(accessToken))
{
if (_serviceEndpoint.Data.TryGetValue("FeedStreamUrl", out var feedStreamUrl) && !string.IsNullOrEmpty(feedStreamUrl))
{
// let's ensure we use the right scheme
feedStreamUrl = feedStreamUrl.Replace("https://", "wss://").Replace("http://", "ws://");
Trace.Info($"Creating websocket client ..." + feedStreamUrl);
this._websocketClient = new ClientWebSocket();
this._websocketClient.Options.SetRequestHeader("Authorization", $"Bearer {accessToken}");
var userAgentValues = new List<ProductInfoHeaderValue>();
userAgentValues.AddRange(UserAgentUtility.GetDefaultRestUserAgent());
userAgentValues.AddRange(HostContext.UserAgents);
this._websocketClient.Options.SetRequestHeader("User-Agent", string.Join(" ", userAgentValues.Select(x => x.ToString())));
this._websocketConnectTask = ConnectWebSocketClient(feedStreamUrl, delay);
}
else
{
Trace.Info($"No FeedStreamUrl found, so we will use Rest API calls for sending feed data");
}
}
else
{
Trace.Info($"No access token from the service endpoint");
}
}
private async Task ConnectWebSocketClient(string feedStreamUrl, TimeSpan delay)
{
try
{
Trace.Info($"Attempting to start websocket client with delay {delay}.");
await Task.Delay(delay);
await this._websocketClient.ConnectAsync(new Uri(feedStreamUrl), default(CancellationToken));
Trace.Info($"Successfully started websocket client.");
}
catch (Exception ex)
{
Trace.Info("Exception caught during websocket client connect, fallback of HTTP would be used now instead of websocket.");
Trace.Error(ex);
}
}
//-----------------------------------------------------------------
// Feedback: WebConsole, TimelineRecords and Logs
//-----------------------------------------------------------------
@@ -72,10 +218,86 @@ namespace GitHub.Runner.Common
return _taskClient.AppendLogContentAsync(scopeIdentifier, hubName, planId, logId, uploadStream, cancellationToken: cancellationToken);
}
public Task AppendTimelineRecordFeedAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, Guid timelineRecordId, Guid stepId, IList<string> lines, CancellationToken cancellationToken)
public async Task AppendTimelineRecordFeedAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, Guid timelineRecordId, Guid stepId, IList<string> lines, long? startLine, CancellationToken cancellationToken)
{
CheckConnection();
return _taskClient.AppendTimelineRecordFeedAsync(scopeIdentifier, hubName, planId, timelineId, timelineRecordId, stepId, lines, cancellationToken: cancellationToken);
var pushedLinesViaWebsocket = false;
if (_websocketConnectTask != null)
{
await _websocketConnectTask;
}
// "_websocketClient != null" implies either: We have a successful connection OR we have to attempt sending again and then reconnect
// ...in other words, if websocket client is null, we will skip sending to websocket and just use rest api calls to send data
if (_websocketClient != null)
{
var linesWrapper = startLine.HasValue ? new TimelineRecordFeedLinesWrapper(stepId, lines, startLine.Value) : new TimelineRecordFeedLinesWrapper(stepId, lines);
var jsonData = StringUtil.ConvertToJson(linesWrapper);
try
{
totalBatchedLinesAttemptedByWebsocket++;
var jsonDataBytes = Encoding.UTF8.GetBytes(jsonData);
// break the message into chunks of 1024 bytes
for (var i = 0; i < jsonDataBytes.Length; i += 1 * 1024)
{
var lastChunk = i + (1 * 1024) >= jsonDataBytes.Length;
var chunk = new ArraySegment<byte>(jsonDataBytes, i, Math.Min(1 * 1024, jsonDataBytes.Length - i));
await _websocketClient.SendAsync(chunk, WebSocketMessageType.Text, endOfMessage: lastChunk, cancellationToken);
}
pushedLinesViaWebsocket = true;
}
catch (Exception ex)
{
failedAttemptsToPostBatchedLinesByWebsocket++;
Trace.Info($"Caught exception during append web console line to websocket, let's fallback to sending via non-websocket call (total calls: {totalBatchedLinesAttemptedByWebsocket}, failed calls: {failedAttemptsToPostBatchedLinesByWebsocket}, websocket state: {this._websocketClient?.State}).");
Trace.Error(ex);
if (totalBatchedLinesAttemptedByWebsocket > _minWebsocketBatchedLinesCountToConsider)
{
// let's consider failure percentage
if (failedAttemptsToPostBatchedLinesByWebsocket * 100 / totalBatchedLinesAttemptedByWebsocket > _minWebsocketFailurePercentageAllowed)
{
Trace.Info($"Exhausted websocket allowed retries, we will not attempt websocket connection for this job to post lines again.");
CloseWebSocket(WebSocketCloseStatus.InternalServerError, cancellationToken);
// By setting it to null, we will ensure that we never try websocket path again for this job
_websocketClient = null;
}
}
if (_websocketClient != null)
{
var delay = BackoffTimerHelper.GetRandomBackoff(_minDelayForWebsocketReconnect, _maxDelayForWebsocketReconnect);
Trace.Info($"Websocket is not open, let's attempt to connect back again with random backoff {delay} ms (total calls: {totalBatchedLinesAttemptedByWebsocket}, failed calls: {failedAttemptsToPostBatchedLinesByWebsocket}).");
InitializeWebsocketClient(delay);
}
}
}
if (!pushedLinesViaWebsocket && !cancellationToken.IsCancellationRequested)
{
if (startLine.HasValue)
{
await _taskClient.AppendTimelineRecordFeedAsync(scopeIdentifier, hubName, planId, timelineId, timelineRecordId, stepId, lines, startLine.Value, cancellationToken: cancellationToken);
}
else
{
await _taskClient.AppendTimelineRecordFeedAsync(scopeIdentifier, hubName, planId, timelineId, timelineRecordId, stepId, lines, cancellationToken: cancellationToken);
}
}
}
private void CloseWebSocket(WebSocketCloseStatus closeStatus, CancellationToken cancellationToken)
{
try
{
_websocketClient?.CloseOutputAsync(closeStatus, "Closing websocket", cancellationToken);
}
catch (Exception websocketEx)
{
// In some cases this might be okay since the websocket might be open yet, so just close and don't trace exceptions
Trace.Info($"Failed to close websocket gracefully {websocketEx.GetType().Name}");
}
}
public Task<TaskAttachment> CreateAttachmentAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, Guid timelineRecordId, string type, string name, Stream uploadStream, CancellationToken cancellationToken)
@@ -113,5 +335,14 @@ namespace GitHub.Runner.Common
CheckConnection();
return _taskClient.GetTimelineAsync(scopeIdentifier, hubName, planId, timelineId, includeRecords: true, cancellationToken: cancellationToken);
}
//-----------------------------------------------------------------
// Action download info
//-----------------------------------------------------------------
public Task<ActionDownloadInfoCollection> ResolveActionDownloadInfoAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid jobId, ActionReferenceList actions, CancellationToken cancellationToken)
{
CheckConnection();
return _taskClient.ResolveActionDownloadInfoAsync(scopeIdentifier, hubName, planId, jobId, actions, cancellationToken: cancellationToken);
}
}
}

View File

@@ -1,24 +1,24 @@
using GitHub.DistributedTask.WebApi;
using GitHub.Runner.Common.Util;
using System;
using System.Collections.Generic;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Pipelines = GitHub.DistributedTask.Pipelines;
using GitHub.DistributedTask.WebApi;
using GitHub.Runner.Sdk;
using Pipelines = GitHub.DistributedTask.Pipelines;
namespace GitHub.Runner.Common
{
[ServiceLocator(Default = typeof(JobServerQueue))]
public interface IJobServerQueue : IRunnerService, IThrottlingReporter
{
TaskCompletionSource<int> JobRecordUpdated { get; }
event EventHandler<ThrottlingEventArgs> JobServerQueueThrottling;
Task ShutdownAsync();
void Start(Pipelines.AgentJobRequestMessage jobRequest);
void QueueWebConsoleLine(Guid stepRecordId, string line);
void QueueWebConsoleLine(Guid stepRecordId, string line, long? lineNumber = null);
void QueueFileUpload(Guid timelineId, Guid timelineRecordId, string type, string name, string path, bool deleteSource);
void QueueTimelineRecordUpdate(Guid timelineId, TimelineRecord timelineRecord);
}
@@ -62,8 +62,11 @@ namespace GitHub.Runner.Common
private IJobServer _jobServer;
private Task[] _allDequeueTasks;
private readonly TaskCompletionSource<int> _jobCompletionSource = new TaskCompletionSource<int>();
private readonly TaskCompletionSource<int> _jobRecordUpdated = new TaskCompletionSource<int>();
private bool _queueInProcess = false;
public TaskCompletionSource<int> JobRecordUpdated => _jobRecordUpdated;
public event EventHandler<ThrottlingEventArgs> JobServerQueueThrottling;
// Web console dequeue will start with process queue every 250ms for the first 60*4 times (~60 seconds).
@@ -72,6 +75,7 @@ namespace GitHub.Runner.Common
// at the same time we can cut the load to server after the build run for more than 60s
private int _webConsoleLineAggressiveDequeueCount = 0;
private const int _webConsoleLineAggressiveDequeueLimit = 4 * 60;
private const int _webConsoleLineQueueSizeLimit = 1024;
private bool _webConsoleLineAggressiveDequeue = true;
private bool _firstConsoleOutputs = true;
@@ -85,6 +89,10 @@ namespace GitHub.Runner.Common
{
Trace.Entering();
var serviceEndPoint = jobRequest.Resources.Endpoints.Single(x => string.Equals(x.Name, WellKnownServiceEndpointNames.SystemVssConnection, StringComparison.OrdinalIgnoreCase));
_jobServer.InitializeWebsocketClient(serviceEndPoint);
if (_queueInProcess)
{
Trace.Info("No-opt, all queue process tasks are running.");
@@ -152,13 +160,28 @@ namespace GitHub.Runner.Common
await ProcessTimelinesUpdateQueueAsync(runOnce: true);
Trace.Info("Timeline update queue drained.");
Trace.Info($"Disposing job server ...");
await _jobServer.DisposeAsync();
Trace.Info("All queue process tasks have been stopped, and all queues are drained.");
}
public void QueueWebConsoleLine(Guid stepRecordId, string line)
public void QueueWebConsoleLine(Guid stepRecordId, string line, long? lineNumber)
{
// We only process 500 lines of the queue everytime.
// If the queue is backing up due to slow Http request or flood of output from step,
// we will drop the output to avoid extra memory consumption from the runner since the live console feed is best effort.
if (!string.IsNullOrEmpty(line) && _webConsoleLineQueue.Count < _webConsoleLineQueueSizeLimit)
{
Trace.Verbose("Enqueue web console line queue: {0}", line);
_webConsoleLineQueue.Enqueue(new ConsoleLineInfo(stepRecordId, line));
if (line.Length > 1024)
{
Trace.Verbose("Web console line is more than 1024 chars, truncate to first 1024 chars");
line = $"{line.Substring(0, 1024)}...";
}
_webConsoleLineQueue.Enqueue(new ConsoleLineInfo(stepRecordId, line, lineNumber));
}
}
public void QueueFileUpload(Guid timelineId, Guid timelineRecordId, string type, string name, string path, bool deleteSource)
@@ -214,7 +237,7 @@ namespace GitHub.Runner.Common
}
// Group consolelines by timeline record of each step
Dictionary<Guid, List<string>> stepsConsoleLines = new Dictionary<Guid, List<string>>();
Dictionary<Guid, List<TimelineRecordLogLine>> stepsConsoleLines = new Dictionary<Guid, List<TimelineRecordLogLine>>();
List<Guid> stepRecordIds = new List<Guid>(); // We need to keep lines in order
int linesCounter = 0;
ConsoleLineInfo lineInfo;
@@ -222,17 +245,11 @@ namespace GitHub.Runner.Common
{
if (!stepsConsoleLines.ContainsKey(lineInfo.StepRecordId))
{
stepsConsoleLines[lineInfo.StepRecordId] = new List<string>();
stepsConsoleLines[lineInfo.StepRecordId] = new List<TimelineRecordLogLine>();
stepRecordIds.Add(lineInfo.StepRecordId);
}
if (!string.IsNullOrEmpty(lineInfo.Line) && lineInfo.Line.Length > 1024)
{
Trace.Verbose("Web console line is more than 1024 chars, truncate to first 1024 chars");
lineInfo.Line = $"{lineInfo.Line.Substring(0, 1024)}...";
}
stepsConsoleLines[lineInfo.StepRecordId].Add(lineInfo.Line);
stepsConsoleLines[lineInfo.StepRecordId].Add(new TimelineRecordLogLine(lineInfo.Line, lineInfo.LineNumber));
linesCounter++;
// process at most about 500 lines of web console line during regular timer dequeue task.
@@ -247,13 +264,13 @@ namespace GitHub.Runner.Common
{
// Split consolelines into batch, each batch will container at most 100 lines.
int batchCounter = 0;
List<List<string>> batchedLines = new List<List<string>>();
List<List<TimelineRecordLogLine>> batchedLines = new List<List<TimelineRecordLogLine>>();
foreach (var line in stepsConsoleLines[stepRecordId])
{
var currentBatch = batchedLines.ElementAtOrDefault(batchCounter);
if (currentBatch == null)
{
batchedLines.Add(new List<string>());
batchedLines.Add(new List<TimelineRecordLogLine>());
currentBatch = batchedLines.ElementAt(batchCounter);
}
@@ -275,7 +292,6 @@ namespace GitHub.Runner.Common
{
Trace.Info($"Skip {batchedLines.Count - 2} batches web console lines for last run");
batchedLines = batchedLines.TakeLast(2).ToList();
batchedLines[0].Insert(0, "...");
}
int errorCount = 0;
@@ -283,8 +299,12 @@ namespace GitHub.Runner.Common
{
try
{
// we will not requeue failed batch, since the web console lines are time sensitive.
await _jobServer.AppendTimelineRecordFeedAsync(_scopeIdentifier, _hubName, _planId, _jobTimelineId, _jobTimelineRecordId, stepRecordId, batch, default(CancellationToken));
// Give at most 60s for each request.
using (var timeoutTokenSource = new CancellationTokenSource(TimeSpan.FromSeconds(60)))
{
await _jobServer.AppendTimelineRecordFeedAsync(_scopeIdentifier, _hubName, _planId, _jobTimelineId, _jobTimelineRecordId, stepRecordId, batch.Select(logLine => logLine.Line).ToList(), batch[0].LineNumber, timeoutTokenSource.Token);
}
if (_firstConsoleOutputs)
{
HostContext.WritePerfCounter($"WorkerJobServerQueueAppendFirstConsoleOutput_{_planId.ToString()}");
@@ -448,6 +468,14 @@ namespace GitHub.Runner.Common
{
Trace.Verbose("Cleanup buffered timeline record for timeline: {0}.", update.TimelineId);
}
if (!_jobRecordUpdated.Task.IsCompleted &&
update.PendingRecords.Any(x => x.Id == _jobTimelineRecordId && x.State != null))
{
// We have changed the state of the job
Trace.Info("Job timeline record has been updated for the first time.");
_jobRecordUpdated.TrySetResult(0);
}
}
catch (Exception ex)
{
@@ -537,6 +565,11 @@ namespace GitHub.Runner.Common
timelineRecord.WarningCount = rec.WarningCount;
}
if (rec.NoticeCount != null && rec.NoticeCount > 0)
{
timelineRecord.NoticeCount = rec.NoticeCount;
}
if (rec.Issues.Count > 0)
{
timelineRecord.Issues.Clear();
@@ -653,13 +686,15 @@ namespace GitHub.Runner.Common
internal class ConsoleLineInfo
{
public ConsoleLineInfo(Guid recordId, string line)
public ConsoleLineInfo(Guid recordId, string line, long? lineNumber)
{
this.StepRecordId = recordId;
this.Line = line;
this.LineNumber = lineNumber;
}
public Guid StepRecordId { get; set; }
public string Line { get; set; }
public long? LineNumber { get; set; }
}
}

View File

@@ -101,7 +101,7 @@ namespace GitHub.Runner.Common
EndPage();
_byteCount = 0;
_dataFileName = Path.Combine(_pagesFolder, $"{_timelineId}_{_timelineRecordId}_{++_pageCount}.log");
_pageData = new FileStream(_dataFileName, FileMode.CreateNew);
_pageData = new FileStream(_dataFileName, FileMode.CreateNew, FileAccess.ReadWrite, FileShare.ReadWrite);
_pageWriter = new StreamWriter(_pageData, System.Text.Encoding.UTF8);
}

View File

@@ -3,6 +3,7 @@ using System.IO;
using System.IO.Pipes;
using System.Threading;
using System.Threading.Tasks;
using GitHub.Runner.Sdk;
namespace GitHub.Runner.Common
{
@@ -68,6 +69,7 @@ namespace GitHub.Runner.Common
public async Task SendAsync(MessageType messageType, string body, CancellationToken cancellationToken)
{
Trace.Info($"Sending message of length {body.Length}, with hash '{IOUtil.GetSha256Hash(body)}'");
await _writeStream.WriteInt32Async((int)messageType, cancellationToken);
await _writeStream.WriteStringAsync(body, cancellationToken);
}
@@ -77,6 +79,7 @@ namespace GitHub.Runner.Common
WorkerMessage result = new WorkerMessage(MessageType.NotInitialized, string.Empty);
result.MessageType = (MessageType)await _readStream.ReadInt32Async(cancellationToken);
result.Body = await _readStream.ReadStringAsync(cancellationToken);
Trace.Info($"Receiving message of length {result.Body.Length}, with hash '{IOUtil.GetSha256Hash(result.Body)}'");
return result;
}

View File

@@ -0,0 +1,106 @@
using System;
using System.Collections.Generic;
using System.Runtime.Serialization;
using System.Threading;
using System.Threading.Tasks;
using GitHub.DistributedTask.Pipelines;
using GitHub.DistributedTask.WebApi;
using GitHub.Runner.Common.Util;
using GitHub.Runner.Sdk;
using GitHub.Services.Common;
using GitHub.Services.WebApi;
namespace GitHub.Runner.Common
{
[ServiceLocator(Default = typeof(RunServer))]
public interface IRunServer : IRunnerService
{
Task ConnectAsync(Uri serverUrl, VssCredentials credentials);
Task<AgentJobRequestMessage> GetJobMessageAsync(string id, CancellationToken token);
}
public sealed class RunServer : RunnerService, IRunServer
{
private bool _hasConnection;
private VssConnection _connection;
private TaskAgentHttpClient _taskAgentClient;
public async Task ConnectAsync(Uri serverUrl, VssCredentials credentials)
{
_connection = await EstablishVssConnection(serverUrl, credentials, TimeSpan.FromSeconds(100));
_taskAgentClient = _connection.GetClient<TaskAgentHttpClient>();
_hasConnection = true;
}
private async Task<VssConnection> EstablishVssConnection(Uri serverUrl, VssCredentials credentials, TimeSpan timeout)
{
Trace.Info($"EstablishVssConnection");
Trace.Info($"Establish connection with {timeout.TotalSeconds} seconds timeout.");
int attemptCount = 5;
while (attemptCount-- > 0)
{
var connection = VssUtil.CreateConnection(serverUrl, credentials, timeout: timeout);
try
{
await connection.ConnectAsync();
return connection;
}
catch (Exception ex) when (attemptCount > 0)
{
Trace.Info($"Catch exception during connect. {attemptCount} attempt left.");
Trace.Error(ex);
await HostContext.Delay(TimeSpan.FromMilliseconds(100), CancellationToken.None);
}
}
// should never reach here.
throw new InvalidOperationException(nameof(EstablishVssConnection));
}
private void CheckConnection()
{
if (!_hasConnection)
{
throw new InvalidOperationException($"SetConnection");
}
}
public Task<AgentJobRequestMessage> GetJobMessageAsync(string id, CancellationToken cancellationToken)
{
CheckConnection();
var jobMessage = RetryRequest<AgentJobRequestMessage>(async () =>
{
return await _taskAgentClient.GetJobMessageAsync(id, cancellationToken);
}, cancellationToken);
return jobMessage;
}
private async Task<T> RetryRequest<T>(Func<Task<T>> func,
CancellationToken cancellationToken,
int maxRetryAttemptsCount = 5
)
{
var retryCount = 0;
while (true)
{
retryCount++;
cancellationToken.ThrowIfCancellationRequested();
try
{
return await func();
}
// TODO: Add handling of non-retriable exceptions: https://github.com/github/actions-broker/issues/122
catch (Exception ex) when (retryCount < maxRetryAttemptsCount)
{
Trace.Error("Catch exception during get full job message");
Trace.Error(ex);
var backOff = BackoffTimerHelper.GetRandomBackoff(TimeSpan.FromSeconds(5), TimeSpan.FromSeconds(15));
Trace.Warning($"Back off {backOff.TotalSeconds} seconds before next retry. {maxRetryAttemptsCount - retryCount} attempt left.");
await Task.Delay(backOff, cancellationToken);
}
}
}
}
}

View File

@@ -1,14 +1,12 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>netcoreapp3.1</TargetFramework>
<TargetFramework>net6.0</TargetFramework>
<OutputType>Library</OutputType>
<RuntimeIdentifiers>win-x64;win-x86;linux-x64;linux-arm64;linux-arm;osx-x64</RuntimeIdentifiers>
<RuntimeIdentifiers>win-x64;win-x86;linux-x64;linux-arm64;linux-arm;osx-x64;osx-arm64</RuntimeIdentifiers>
<TargetLatestRuntimePatch>true</TargetLatestRuntimePatch>
<AssetTargetFallback>portable-net45+win8</AssetTargetFallback>
<NoWarn>NU1701;NU1603</NoWarn>
<Version>$(Version)</Version>
<TieredCompilationQuickJit>true</TieredCompilationQuickJit>
</PropertyGroup>
<ItemGroup>

View File

@@ -29,8 +29,10 @@ namespace GitHub.Runner.Common
// Configuration
Task<TaskAgent> AddAgentAsync(Int32 agentPoolId, TaskAgent agent);
Task DeleteAgentAsync(int agentPoolId, int agentId);
Task DeleteAgentAsync(int agentId);
Task<List<TaskAgentPool>> GetAgentPoolsAsync(string agentPoolName = null, TaskAgentPoolType poolType = TaskAgentPoolType.Automation);
Task<List<TaskAgent>> GetAgentsAsync(int agentPoolId, string agentName = null);
Task<List<TaskAgent>> GetAgentsAsync(string agentName);
Task<TaskAgent> ReplaceAgentAsync(int agentPoolId, TaskAgent agent);
// messagequeue
@@ -45,15 +47,11 @@ namespace GitHub.Runner.Common
Task<TaskAgentJobRequest> FinishAgentRequestAsync(int poolId, long requestId, Guid lockToken, DateTime finishTime, TaskResult result, CancellationToken cancellationToken);
// agent package
Task<List<PackageMetadata>> GetPackagesAsync(string packageType, string platform, int top, CancellationToken cancellationToken);
Task<PackageMetadata> GetPackageAsync(string packageType, string platform, string version, CancellationToken cancellationToken);
Task<List<PackageMetadata>> GetPackagesAsync(string packageType, string platform, int top, bool includeToken, CancellationToken cancellationToken);
Task<PackageMetadata> GetPackageAsync(string packageType, string platform, string version, bool includeToken, CancellationToken cancellationToken);
// agent update
Task<TaskAgent> UpdateAgentUpdateStateAsync(int agentPoolId, int agentId, string currentState);
// runner authorization url
Task<string> GetRunnerAuthUrlAsync(int runnerPoolId, int runnerId);
Task ReportRunnerAuthUrlErrorAsync(int runnerPoolId, int runnerId, string error);
Task<TaskAgent> UpdateAgentUpdateStateAsync(int agentPoolId, int agentId, string currentState, string trace);
}
public sealed class RunnerServer : RunnerService, IRunnerServer
@@ -256,6 +254,11 @@ namespace GitHub.Runner.Common
return _genericTaskAgentClient.GetAgentsAsync(agentPoolId, agentName, false);
}
public Task<List<TaskAgent>> GetAgentsAsync(string agentName)
{
return GetAgentsAsync(0, agentName); // search in all all agentPools
}
public Task<TaskAgent> ReplaceAgentAsync(int agentPoolId, TaskAgent agent)
{
CheckConnection(RunnerConnectionType.Generic);
@@ -268,6 +271,11 @@ namespace GitHub.Runner.Common
return _genericTaskAgentClient.DeleteAgentAsync(agentPoolId, agentId);
}
public Task DeleteAgentAsync(int agentId)
{
return DeleteAgentAsync(0, agentId); // agentPool is ignored server side
}
//-----------------------------------------------------------------
// MessageQueue
//-----------------------------------------------------------------
@@ -321,37 +329,22 @@ namespace GitHub.Runner.Common
//-----------------------------------------------------------------
// Agent Package
//-----------------------------------------------------------------
public Task<List<PackageMetadata>> GetPackagesAsync(string packageType, string platform, int top, CancellationToken cancellationToken)
public Task<List<PackageMetadata>> GetPackagesAsync(string packageType, string platform, int top, bool includeToken, CancellationToken cancellationToken)
{
CheckConnection(RunnerConnectionType.Generic);
return _genericTaskAgentClient.GetPackagesAsync(packageType, platform, top, cancellationToken: cancellationToken);
return _genericTaskAgentClient.GetPackagesAsync(packageType, platform, top, includeToken, cancellationToken: cancellationToken);
}
public Task<PackageMetadata> GetPackageAsync(string packageType, string platform, string version, CancellationToken cancellationToken)
public Task<PackageMetadata> GetPackageAsync(string packageType, string platform, string version, bool includeToken, CancellationToken cancellationToken)
{
CheckConnection(RunnerConnectionType.Generic);
return _genericTaskAgentClient.GetPackageAsync(packageType, platform, version, cancellationToken: cancellationToken);
return _genericTaskAgentClient.GetPackageAsync(packageType, platform, version, includeToken, cancellationToken: cancellationToken);
}
public Task<TaskAgent> UpdateAgentUpdateStateAsync(int agentPoolId, int agentId, string currentState)
public Task<TaskAgent> UpdateAgentUpdateStateAsync(int agentPoolId, int agentId, string currentState, string trace)
{
CheckConnection(RunnerConnectionType.Generic);
return _genericTaskAgentClient.UpdateAgentUpdateStateAsync(agentPoolId, agentId, currentState);
}
//-----------------------------------------------------------------
// Runner Auth Url
//-----------------------------------------------------------------
public Task<string> GetRunnerAuthUrlAsync(int runnerPoolId, int runnerId)
{
CheckConnection(RunnerConnectionType.MessageQueue);
return _messageTaskAgentClient.GetAgentAuthUrlAsync(runnerPoolId, runnerId);
}
public Task ReportRunnerAuthUrlErrorAsync(int runnerPoolId, int runnerId, string error)
{
CheckConnection(RunnerConnectionType.MessageQueue);
return _messageTaskAgentClient.ReportAgentAuthUrlMigrationErrorAsync(runnerPoolId, runnerId, error);
return _genericTaskAgentClient.UpdateAgentUpdateStateAsync(agentPoolId, agentId, currentState, trace);
}
}
}

View File

@@ -96,13 +96,14 @@ namespace GitHub.Runner.Common
Trace.Info($"WRITE: {message}");
if (!Silent)
{
if(colorCode != null)
if (colorCode != null)
{
Console.ForegroundColor = colorCode.Value;
Console.Write(message);
Console.ResetColor();
}
else {
else
{
Console.Write(message);
}
}
@@ -120,13 +121,14 @@ namespace GitHub.Runner.Common
Trace.Info($"WRITE LINE: {line}");
if (!Silent)
{
if(colorCode != null)
if (colorCode != null)
{
Console.ForegroundColor = colorCode.Value;
Console.WriteLine(line);
Console.ResetColor();
}
else {
else
{
Console.WriteLine(line);
}
}
@@ -162,9 +164,8 @@ namespace GitHub.Runner.Common
if (!Silent)
{
Console.WriteLine();
Console.ForegroundColor = ConsoleColor.White;
Console.WriteLine($"# {message}");
Console.ResetColor();
Console.WriteLine($"# {message}");
Console.WriteLine();
}
}
@@ -175,9 +176,8 @@ namespace GitHub.Runner.Common
{
Console.ForegroundColor = ConsoleColor.Green;
Console.Write("√ ");
Console.ForegroundColor = ConsoleColor.White;
Console.WriteLine(message);
Console.ResetColor();
Console.WriteLine(message);
}
}

Some files were not shown because too many files have changed in this diff Show More